1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
6 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/frames.h"
11 #include "src/globals.h"
16 // ----------------------------------------------------------------------------
17 // Static helper functions
19 // Generate a MemOperand for loading a field from an object.
20 inline MemOperand FieldMemOperand(Register object, int offset) {
21 return MemOperand(object, offset - kHeapObjectTag);
25 // Give alias names to registers
26 const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
27 const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
28 const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
30 // Flags used for AllocateHeapNumber
39 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
40 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
41 enum PointersToHereCheck {
42 kPointersToHereMaybeInteresting,
43 kPointersToHereAreAlwaysInteresting
45 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
48 Register GetRegisterThatIsNotOneOf(Register reg1,
49 Register reg2 = no_reg,
50 Register reg3 = no_reg,
51 Register reg4 = no_reg,
52 Register reg5 = no_reg,
53 Register reg6 = no_reg);
57 bool AreAliased(Register reg1,
59 Register reg3 = no_reg,
60 Register reg4 = no_reg,
61 Register reg5 = no_reg,
62 Register reg6 = no_reg,
63 Register reg7 = no_reg,
64 Register reg8 = no_reg);
68 enum TargetAddressStorageMode {
69 CAN_INLINE_TARGET_ADDRESS,
70 NEVER_INLINE_TARGET_ADDRESS
73 // MacroAssembler implements a collection of frequently used macros.
74 class MacroAssembler: public Assembler {
76 // The isolate parameter can be NULL if the macro assembler should
77 // not use isolate-dependent functionality. In this case, it's the
78 // responsibility of the caller to never invoke such function on the
80 MacroAssembler(Isolate* isolate, void* buffer, int size);
83 // Returns the size of a call in instructions. Note, the value returned is
84 // only valid as long as no entries are added to the constant pool between
85 // checking the call size and emitting the actual call.
86 static int CallSize(Register target, Condition cond = al);
87 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
88 int CallStubSize(CodeStub* stub,
89 TypeFeedbackId ast_id = TypeFeedbackId::None(),
91 static int CallSizeNotPredictableCodeSize(Isolate* isolate,
93 RelocInfo::Mode rmode,
96 // Jump, Call, and Ret pseudo instructions implementing inter-working.
97 void Jump(Register target, Condition cond = al);
98 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
99 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
100 void Call(Register target, Condition cond = al);
101 void Call(Address target, RelocInfo::Mode rmode,
103 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
104 int CallSize(Handle<Code> code,
105 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
106 TypeFeedbackId ast_id = TypeFeedbackId::None(),
107 Condition cond = al);
108 void Call(Handle<Code> code,
109 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
110 TypeFeedbackId ast_id = TypeFeedbackId::None(),
112 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
113 void Ret(Condition cond = al);
115 // Emit code to discard a non-negative number of pointer-sized elements
116 // from the stack, clobbering only the sp register.
117 void Drop(int count, Condition cond = al);
119 void Ret(int drop, Condition cond = al);
121 // Swap two registers. If the scratch register is omitted then a slightly
122 // less efficient form using xor instead of mov is emitted.
123 void Swap(Register reg1,
125 Register scratch = no_reg,
126 Condition cond = al);
128 void Mls(Register dst, Register src1, Register src2, Register srcA,
129 Condition cond = al);
130 void And(Register dst, Register src1, const Operand& src2,
131 Condition cond = al);
132 void Ubfx(Register dst, Register src, int lsb, int width,
133 Condition cond = al);
134 void Sbfx(Register dst, Register src, int lsb, int width,
135 Condition cond = al);
136 // The scratch register is not used for ARMv7.
137 // scratch can be the same register as src (in which case it is trashed), but
138 // not the same as dst.
139 void Bfi(Register dst,
144 Condition cond = al);
145 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
146 void Usat(Register dst, int satpos, const Operand& src,
147 Condition cond = al);
149 void Call(Label* target);
150 void Push(Register src) { push(src); }
151 void Pop(Register dst) { pop(dst); }
153 // Register move. May do nothing if the registers are identical.
154 void Move(Register dst, Handle<Object> value);
155 void Move(Register dst, Register src, Condition cond = al);
156 void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
157 Condition cond = al) {
158 if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) {
159 mov(dst, src, sbit, cond);
162 void Move(DwVfpRegister dst, DwVfpRegister src);
164 void Load(Register dst, const MemOperand& src, Representation r);
165 void Store(Register src, const MemOperand& dst, Representation r);
167 // Load an object from the root table.
168 void LoadRoot(Register destination,
169 Heap::RootListIndex index,
170 Condition cond = al);
171 // Store an object to the root table.
172 void StoreRoot(Register source,
173 Heap::RootListIndex index,
174 Condition cond = al);
176 // ---------------------------------------------------------------------------
179 void IncrementalMarkingRecordWriteHelper(Register object,
183 enum RememberedSetFinalAction {
188 // Record in the remembered set the fact that we have a pointer to new space
189 // at the address pointed to by the addr register. Only works if addr is not
191 void RememberedSetHelper(Register object, // Used for debug code.
194 SaveFPRegsMode save_fp,
195 RememberedSetFinalAction and_then);
197 void CheckPageFlag(Register object,
201 Label* condition_met);
203 // Check if object is in new space. Jumps if the object is not in new space.
204 // The register scratch can be object itself, but scratch will be clobbered.
205 void JumpIfNotInNewSpace(Register object,
208 InNewSpace(object, scratch, ne, branch);
211 // Check if object is in new space. Jumps if the object is in new space.
212 // The register scratch can be object itself, but it will be clobbered.
213 void JumpIfInNewSpace(Register object,
216 InNewSpace(object, scratch, eq, branch);
219 // Check if an object has a given incremental marking color.
220 void HasColor(Register object,
227 void JumpIfBlack(Register object,
232 // Checks the color of an object. If the object is already grey or black
233 // then we just fall through, since it is already live. If it is white and
234 // we can determine that it doesn't need to be scanned, then we just mark it
235 // black and fall through. For the rest we jump to the label so the
236 // incremental marker can fix its assumptions.
237 void EnsureNotWhite(Register object,
241 Label* object_is_white_and_not_data);
243 // Detects conservatively whether an object is data-only, i.e. it does need to
244 // be scanned by the garbage collector.
245 void JumpIfDataObject(Register value,
247 Label* not_data_object);
249 // Notify the garbage collector that we wrote a pointer into an object.
250 // |object| is the object being stored into, |value| is the object being
251 // stored. value and scratch registers are clobbered by the operation.
252 // The offset is the offset from the start of the object, not the offset from
253 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
254 void RecordWriteField(
259 LinkRegisterStatus lr_status,
260 SaveFPRegsMode save_fp,
261 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
262 SmiCheck smi_check = INLINE_SMI_CHECK,
263 PointersToHereCheck pointers_to_here_check_for_value =
264 kPointersToHereMaybeInteresting);
266 // As above, but the offset has the tag presubtracted. For use with
267 // MemOperand(reg, off).
268 inline void RecordWriteContextSlot(
273 LinkRegisterStatus lr_status,
274 SaveFPRegsMode save_fp,
275 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
276 SmiCheck smi_check = INLINE_SMI_CHECK,
277 PointersToHereCheck pointers_to_here_check_for_value =
278 kPointersToHereMaybeInteresting) {
279 RecordWriteField(context,
280 offset + kHeapObjectTag,
285 remembered_set_action,
287 pointers_to_here_check_for_value);
290 void RecordWriteForMap(
294 LinkRegisterStatus lr_status,
295 SaveFPRegsMode save_fp);
297 // For a given |object| notify the garbage collector that the slot |address|
298 // has been written. |value| is the object being stored. The value and
299 // address registers are clobbered by the operation.
304 LinkRegisterStatus lr_status,
305 SaveFPRegsMode save_fp,
306 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
307 SmiCheck smi_check = INLINE_SMI_CHECK,
308 PointersToHereCheck pointers_to_here_check_for_value =
309 kPointersToHereMaybeInteresting);
312 void Push(Handle<Object> handle);
313 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
315 // Push two registers. Pushes leftmost register first (to highest address).
316 void Push(Register src1, Register src2, Condition cond = al) {
317 DCHECK(!src1.is(src2));
318 if (src1.code() > src2.code()) {
319 stm(db_w, sp, src1.bit() | src2.bit(), cond);
321 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
322 str(src2, MemOperand(sp, 4, NegPreIndex), cond);
326 // Push three registers. Pushes leftmost register first (to highest address).
327 void Push(Register src1, Register src2, Register src3, Condition cond = al) {
328 DCHECK(!src1.is(src2));
329 DCHECK(!src2.is(src3));
330 DCHECK(!src1.is(src3));
331 if (src1.code() > src2.code()) {
332 if (src2.code() > src3.code()) {
333 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
335 stm(db_w, sp, src1.bit() | src2.bit(), cond);
336 str(src3, MemOperand(sp, 4, NegPreIndex), cond);
339 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
340 Push(src2, src3, cond);
344 // Push four registers. Pushes leftmost register first (to highest address).
345 void Push(Register src1,
349 Condition cond = al) {
350 DCHECK(!src1.is(src2));
351 DCHECK(!src2.is(src3));
352 DCHECK(!src1.is(src3));
353 DCHECK(!src1.is(src4));
354 DCHECK(!src2.is(src4));
355 DCHECK(!src3.is(src4));
356 if (src1.code() > src2.code()) {
357 if (src2.code() > src3.code()) {
358 if (src3.code() > src4.code()) {
361 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
364 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
365 str(src4, MemOperand(sp, 4, NegPreIndex), cond);
368 stm(db_w, sp, src1.bit() | src2.bit(), cond);
369 Push(src3, src4, cond);
372 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
373 Push(src2, src3, src4, cond);
377 // Pop two registers. Pops rightmost register first (from lower address).
378 void Pop(Register src1, Register src2, Condition cond = al) {
379 DCHECK(!src1.is(src2));
380 if (src1.code() > src2.code()) {
381 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
383 ldr(src2, MemOperand(sp, 4, PostIndex), cond);
384 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
388 // Pop three registers. Pops rightmost register first (from lower address).
389 void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
390 DCHECK(!src1.is(src2));
391 DCHECK(!src2.is(src3));
392 DCHECK(!src1.is(src3));
393 if (src1.code() > src2.code()) {
394 if (src2.code() > src3.code()) {
395 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
397 ldr(src3, MemOperand(sp, 4, PostIndex), cond);
398 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
401 Pop(src2, src3, cond);
402 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
406 // Pop four registers. Pops rightmost register first (from lower address).
407 void Pop(Register src1,
411 Condition cond = al) {
412 DCHECK(!src1.is(src2));
413 DCHECK(!src2.is(src3));
414 DCHECK(!src1.is(src3));
415 DCHECK(!src1.is(src4));
416 DCHECK(!src2.is(src4));
417 DCHECK(!src3.is(src4));
418 if (src1.code() > src2.code()) {
419 if (src2.code() > src3.code()) {
420 if (src3.code() > src4.code()) {
423 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
426 ldr(src4, MemOperand(sp, 4, PostIndex), cond);
427 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
430 Pop(src3, src4, cond);
431 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
434 Pop(src2, src3, src4, cond);
435 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
439 // Push a fixed frame, consisting of lr, fp, constant pool (if
440 // FLAG_enable_ool_constant_pool), context and JS function / marker id if
441 // marker_reg is a valid register.
442 void PushFixedFrame(Register marker_reg = no_reg);
443 void PopFixedFrame(Register marker_reg = no_reg);
445 // Push and pop the registers that can hold pointers, as defined by the
446 // RegList constant kSafepointSavedRegisters.
447 void PushSafepointRegisters();
448 void PopSafepointRegisters();
449 // Store value in register src in the safepoint stack slot for
451 void StoreToSafepointRegisterSlot(Register src, Register dst);
452 // Load the value of the src register from its safepoint stack slot
453 // into register dst.
454 void LoadFromSafepointRegisterSlot(Register dst, Register src);
456 // Load two consecutive registers with two consecutive memory locations.
457 void Ldrd(Register dst1,
459 const MemOperand& src,
460 Condition cond = al);
462 // Store two consecutive registers to two consecutive memory locations.
463 void Strd(Register src1,
465 const MemOperand& dst,
466 Condition cond = al);
468 // Ensure that FPSCR contains values needed by JavaScript.
469 // We need the NaNModeControlBit to be sure that operations like
470 // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
471 // In VFP3 it will be always the Canonical NaN.
472 // In VFP2 it will be either the Canonical NaN or the negative version
473 // of the Canonical NaN. It doesn't matter if we have two values. The aim
474 // is to be sure to never generate the hole NaN.
475 void VFPEnsureFPSCRState(Register scratch);
477 // If the value is a NaN, canonicalize the value else, do nothing.
478 void VFPCanonicalizeNaN(const DwVfpRegister dst,
479 const DwVfpRegister src,
480 const Condition cond = al);
481 void VFPCanonicalizeNaN(const DwVfpRegister value,
482 const Condition cond = al) {
483 VFPCanonicalizeNaN(value, value, cond);
486 // Compare double values and move the result to the normal condition flags.
487 void VFPCompareAndSetFlags(const DwVfpRegister src1,
488 const DwVfpRegister src2,
489 const Condition cond = al);
490 void VFPCompareAndSetFlags(const DwVfpRegister src1,
492 const Condition cond = al);
494 // Compare double values and then load the fpscr flags to a register.
495 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
496 const DwVfpRegister src2,
497 const Register fpscr_flags,
498 const Condition cond = al);
499 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
501 const Register fpscr_flags,
502 const Condition cond = al);
504 void Vmov(const DwVfpRegister dst,
506 const Register scratch = no_reg);
508 void VmovHigh(Register dst, DwVfpRegister src);
509 void VmovHigh(DwVfpRegister dst, Register src);
510 void VmovLow(Register dst, DwVfpRegister src);
511 void VmovLow(DwVfpRegister dst, Register src);
513 // Loads the number from object into dst register.
514 // If |object| is neither smi nor heap number, |not_number| is jumped to
515 // with |object| still intact.
516 void LoadNumber(Register object,
517 LowDwVfpRegister dst,
518 Register heap_number_map,
522 // Loads the number from object into double_dst in the double format.
523 // Control will jump to not_int32 if the value cannot be exactly represented
524 // by a 32-bit integer.
525 // Floating point value in the 32-bit integer range that are not exact integer
527 void LoadNumberAsInt32Double(Register object,
528 DwVfpRegister double_dst,
529 Register heap_number_map,
531 LowDwVfpRegister double_scratch,
534 // Loads the number from object into dst as a 32-bit integer.
535 // Control will jump to not_int32 if the object cannot be exactly represented
536 // by a 32-bit integer.
537 // Floating point value in the 32-bit integer range that are not exact integer
538 // won't be converted.
539 void LoadNumberAsInt32(Register object,
541 Register heap_number_map,
543 DwVfpRegister double_scratch0,
544 LowDwVfpRegister double_scratch1,
547 // Generates function and stub prologue code.
549 void Prologue(bool code_pre_aging);
552 // stack_space - extra stack space, used for alignment before call to C.
553 void EnterExitFrame(bool save_doubles, int stack_space = 0);
555 // Leave the current exit frame. Expects the return value in r0.
556 // Expect the number of values, pushed prior to the exit frame, to
557 // remove in a register (or no_reg, if there is nothing to remove).
558 void LeaveExitFrame(bool save_doubles, Register argument_count,
559 bool restore_context,
560 bool argument_count_is_length = false);
562 // Get the actual activation frame alignment for target environment.
563 static int ActivationFrameAlignment();
565 void LoadContext(Register dst, int context_chain_length);
567 // Conditionally load the cached Array transitioned map of type
568 // transitioned_kind from the native context if the map in register
569 // map_in_out is the cached Array map in the native context of
571 void LoadTransitionedArrayMapConditional(
572 ElementsKind expected_kind,
573 ElementsKind transitioned_kind,
576 Label* no_map_match);
578 void LoadGlobalFunction(int index, Register function);
580 // Load the initial map from the global function. The registers
581 // function and map can be the same, function is then overwritten.
582 void LoadGlobalFunctionInitialMap(Register function,
586 void InitializeRootRegister() {
587 ExternalReference roots_array_start =
588 ExternalReference::roots_array_start(isolate());
589 mov(kRootRegister, Operand(roots_array_start));
592 // ---------------------------------------------------------------------------
593 // JavaScript invokes
595 // Invoke the JavaScript function code by either calling or jumping.
596 void InvokeCode(Register code,
597 const ParameterCount& expected,
598 const ParameterCount& actual,
600 const CallWrapper& call_wrapper);
602 // Invoke the JavaScript function in the given register. Changes the
603 // current context to the context in the function before invoking.
604 void InvokeFunction(Register function,
605 const ParameterCount& actual,
607 const CallWrapper& call_wrapper);
609 void InvokeFunction(Register function,
610 const ParameterCount& expected,
611 const ParameterCount& actual,
613 const CallWrapper& call_wrapper);
615 void InvokeFunction(Handle<JSFunction> function,
616 const ParameterCount& expected,
617 const ParameterCount& actual,
619 const CallWrapper& call_wrapper);
621 void IsObjectJSObjectType(Register heap_object,
626 void IsInstanceJSObjectType(Register map,
630 void IsObjectJSStringType(Register object,
634 void IsObjectNameType(Register object,
638 // ---------------------------------------------------------------------------
643 // ---------------------------------------------------------------------------
644 // Exception handling
646 // Push a new try handler and link into try handler chain.
647 void PushTryHandler(StackHandler::Kind kind, int handler_index);
649 // Unlink the stack handler on top of the stack from the try handler chain.
650 // Must preserve the result register.
651 void PopTryHandler();
653 // Passes thrown value to the handler of top of the try handler chain.
654 void Throw(Register value);
656 // Propagates an uncatchable exception to the top of the current JS stack's
658 void ThrowUncatchable(Register value);
660 // ---------------------------------------------------------------------------
661 // Inline caching support
663 // Generate code for checking access rights - used for security checks
664 // on access to global objects across environments. The holder register
665 // is left untouched, whereas both scratch registers are clobbered.
666 void CheckAccessGlobalProxy(Register holder_reg,
670 void GetNumberHash(Register t0, Register scratch);
672 void LoadFromNumberDictionary(Label* miss,
681 inline void MarkCode(NopMarkerTypes type) {
685 // Check if the given instruction is a 'type' marker.
686 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
687 // These instructions are generated to mark special location in the code,
688 // like some special IC code.
689 static inline bool IsMarkedCode(Instr instr, int type) {
690 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
691 return IsNop(instr, type);
695 static inline int GetCodeMarker(Instr instr) {
696 int dst_reg_offset = 12;
697 int dst_mask = 0xf << dst_reg_offset;
699 int dst_reg = (instr & dst_mask) >> dst_reg_offset;
700 int src_reg = instr & src_mask;
701 uint32_t non_register_mask = ~(dst_mask | src_mask);
702 uint32_t mov_mask = al | 13 << 21;
704 // Return <n> if we have a mov rn rn, else return -1.
705 int type = ((instr & non_register_mask) == mov_mask) &&
706 (dst_reg == src_reg) &&
707 (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
710 DCHECK((type == -1) ||
711 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
716 // ---------------------------------------------------------------------------
717 // Allocation support
719 // Allocate an object in new space or old pointer space. The object_size is
720 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
721 // is passed. If the space is exhausted control continues at the gc_required
722 // label. The allocated object is returned in result. If the flag
723 // tag_allocated_object is true the result is tagged as as a heap object.
724 // All registers are clobbered also when control continues at the gc_required
726 void Allocate(int object_size,
731 AllocationFlags flags);
733 void Allocate(Register object_size,
738 AllocationFlags flags);
740 // Undo allocation in new space. The object passed and objects allocated after
741 // it will no longer be allocated. The caller must make sure that no pointers
742 // are left to the object(s) no longer allocated as they would be invalid when
743 // allocation is undone.
744 void UndoAllocationInNewSpace(Register object, Register scratch);
747 void AllocateTwoByteString(Register result,
753 void AllocateOneByteString(Register result, Register length,
754 Register scratch1, Register scratch2,
755 Register scratch3, Label* gc_required);
756 void AllocateTwoByteConsString(Register result,
761 void AllocateOneByteConsString(Register result, Register length,
762 Register scratch1, Register scratch2,
764 void AllocateTwoByteSlicedString(Register result,
769 void AllocateOneByteSlicedString(Register result, Register length,
770 Register scratch1, Register scratch2,
773 // Allocates a heap number or jumps to the gc_required label if the young
774 // space is full and a scavenge is needed. All registers are clobbered also
775 // when control continues at the gc_required label.
776 void AllocateHeapNumber(Register result,
779 Register heap_number_map,
781 TaggingMode tagging_mode = TAG_RESULT,
782 MutableMode mode = IMMUTABLE);
783 void AllocateHeapNumberWithValue(Register result,
787 Register heap_number_map,
790 // Copies a fixed number of fields of heap objects from src to dst.
791 void CopyFields(Register dst,
793 LowDwVfpRegister double_scratch,
796 // Copies a number of bytes from src to dst. All registers are clobbered. On
797 // exit src and dst will point to the place just after where the last byte was
798 // read or written and length will be zero.
799 void CopyBytes(Register src,
804 // Initialize fields with filler values. Fields starting at |start_offset|
805 // not including end_offset are overwritten with the value in |filler|. At
806 // the end the loop, |start_offset| takes the value of |end_offset|.
807 void InitializeFieldsWithFiller(Register start_offset,
811 // ---------------------------------------------------------------------------
812 // Support functions.
814 // Try to get function prototype of a function and puts the value in
815 // the result register. Checks that the function really is a
816 // function and jumps to the miss label if the fast checks fail. The
817 // function register will be untouched; the other registers may be
819 void TryGetFunctionPrototype(Register function,
823 bool miss_on_bound_function = false);
825 // Compare object type for heap object. heap_object contains a non-Smi
826 // whose object type should be compared with the given type. This both
827 // sets the flags and leaves the object type in the type_reg register.
828 // It leaves the map in the map register (unless the type_reg and map register
829 // are the same register). It leaves the heap object in the heap_object
830 // register unless the heap_object register is the same register as one of the
832 // Type_reg can be no_reg. In that case ip is used.
833 void CompareObjectType(Register heap_object,
838 // Compare object type for heap object. Branch to false_label if type
839 // is lower than min_type or greater than max_type.
840 // Load map into the register map.
841 void CheckObjectTypeRange(Register heap_object,
843 InstanceType min_type,
844 InstanceType max_type,
847 // Compare instance type in a map. map contains a valid map object whose
848 // object type should be compared with the given type. This both
849 // sets the flags and leaves the object type in the type_reg register.
850 void CompareInstanceType(Register map,
855 // Check if a map for a JSObject indicates that the object has fast elements.
856 // Jump to the specified label if it does not.
857 void CheckFastElements(Register map,
861 // Check if a map for a JSObject indicates that the object can have both smi
862 // and HeapObject elements. Jump to the specified label if it does not.
863 void CheckFastObjectElements(Register map,
867 // Check if a map for a JSObject indicates that the object has fast smi only
868 // elements. Jump to the specified label if it does not.
869 void CheckFastSmiElements(Register map,
873 // Check to see if maybe_number can be stored as a double in
874 // FastDoubleElements. If it can, store it at the index specified by key in
875 // the FastDoubleElements array elements. Otherwise jump to fail.
876 void StoreNumberToDoubleElements(Register value_reg,
878 Register elements_reg,
880 LowDwVfpRegister double_scratch,
882 int elements_offset = 0);
884 // Compare an object's map with the specified map and its transitioned
885 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
886 // set with result of map compare. If multiple map compares are required, the
887 // compare sequences branches to early_success.
888 void CompareMap(Register obj,
891 Label* early_success);
893 // As above, but the map of the object is already loaded into the register
894 // which is preserved by the code generated.
895 void CompareMap(Register obj_map,
897 Label* early_success);
899 // Check if the map of an object is equal to a specified map and branch to
900 // label if not. Skip the smi check if not required (object is known to be a
901 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
902 // against maps that are ElementsKind transition maps of the specified map.
903 void CheckMap(Register obj,
907 SmiCheckType smi_check_type);
910 void CheckMap(Register obj,
912 Heap::RootListIndex index,
914 SmiCheckType smi_check_type);
917 // Check if the map of an object is equal to a specified weak map and branch
918 // to a specified target if equal. Skip the smi check if not required
919 // (object is known to be a heap object)
920 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
921 Handle<WeakCell> cell, Handle<Code> success,
922 SmiCheckType smi_check_type);
924 // Compare the given value and the value of weak cell.
925 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
927 void GetWeakValue(Register value, Handle<WeakCell> cell);
929 // Load the value of the weak cell in the value register. Branch to the given
930 // miss label if the weak cell was cleared.
931 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
933 // Compare the object in a register to a value from the root list.
934 // Uses the ip register as scratch.
935 void CompareRoot(Register obj, Heap::RootListIndex index);
938 // Load and check the instance type of an object for being a string.
939 // Loads the type into the second argument register.
940 // Returns a condition that will be enabled if the object was a string
941 // and the passed-in condition passed. If the passed-in condition failed
942 // then flags remain unchanged.
943 Condition IsObjectStringType(Register obj,
945 Condition cond = al) {
946 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
947 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
948 tst(type, Operand(kIsNotStringMask), cond);
949 DCHECK_EQ(0u, kStringTag);
954 // Picks out an array index from the hash field.
956 // hash - holds the index's hash. Clobbered.
957 // index - holds the overwritten index on exit.
958 void IndexFromHash(Register hash, Register index);
960 // Get the number of least significant bits from a register
961 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
962 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
964 // Load the value of a smi object into a double register.
965 // The register value must be between d0 and d15.
966 void SmiToDouble(LowDwVfpRegister value, Register smi);
968 // Check if a double can be exactly represented as a signed 32-bit integer.
969 // Z flag set to one if true.
970 void TestDoubleIsInt32(DwVfpRegister double_input,
971 LowDwVfpRegister double_scratch);
973 // Try to convert a double to a signed 32-bit integer.
974 // Z flag set to one and result assigned if the conversion is exact.
975 void TryDoubleToInt32Exact(Register result,
976 DwVfpRegister double_input,
977 LowDwVfpRegister double_scratch);
979 // Floor a double and writes the value to the result register.
980 // Go to exact if the conversion is exact (to be able to test -0),
981 // fall through calling code if an overflow occurred, else go to done.
982 // In return, input_high is loaded with high bits of input.
983 void TryInt32Floor(Register result,
984 DwVfpRegister double_input,
986 LowDwVfpRegister double_scratch,
990 // Performs a truncating conversion of a floating point number as used by
991 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
992 // succeeds, otherwise falls through if result is saturated. On return
993 // 'result' either holds answer, or is clobbered on fall through.
995 // Only public for the test code in test-code-stubs-arm.cc.
996 void TryInlineTruncateDoubleToI(Register result,
1000 // Performs a truncating conversion of a floating point number as used by
1001 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1002 // Exits with 'result' holding the answer.
1003 void TruncateDoubleToI(Register result, DwVfpRegister double_input);
1005 // Performs a truncating conversion of a heap number as used by
1006 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1007 // must be different registers. Exits with 'result' holding the answer.
1008 void TruncateHeapNumberToI(Register result, Register object);
1010 // Converts the smi or heap number in object to an int32 using the rules
1011 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1012 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1013 // different registers.
1014 void TruncateNumberToI(Register object,
1016 Register heap_number_map,
1020 // Check whether d16-d31 are available on the CPU. The result is given by the
1021 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
1022 void CheckFor32DRegs(Register scratch);
1024 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
1025 // values to location, saving [d0..(d15|d31)].
1026 void SaveFPRegs(Register location, Register scratch);
1028 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
1029 // values to location, restoring [d0..(d15|d31)].
1030 void RestoreFPRegs(Register location, Register scratch);
1032 // ---------------------------------------------------------------------------
1035 // Call a code stub.
1036 void CallStub(CodeStub* stub,
1037 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1038 Condition cond = al);
1040 // Call a code stub.
1041 void TailCallStub(CodeStub* stub, Condition cond = al);
1043 // Call a runtime routine.
1044 void CallRuntime(const Runtime::Function* f,
1046 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1047 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1048 const Runtime::Function* function = Runtime::FunctionForId(id);
1049 CallRuntime(function, function->nargs, kSaveFPRegs);
1052 // Convenience function: Same as above, but takes the fid instead.
1053 void CallRuntime(Runtime::FunctionId id,
1055 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1056 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1059 // Convenience function: call an external reference.
1060 void CallExternalReference(const ExternalReference& ext,
1063 // Tail call of a runtime routine (jump).
1064 // Like JumpToExternalReference, but also takes care of passing the number
1066 void TailCallExternalReference(const ExternalReference& ext,
1070 // Convenience function: tail call a runtime routine (jump).
1071 void TailCallRuntime(Runtime::FunctionId fid,
1075 int CalculateStackPassedWords(int num_reg_arguments,
1076 int num_double_arguments);
1078 // Before calling a C-function from generated code, align arguments on stack.
1079 // After aligning the frame, non-register arguments must be stored in
1080 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
1081 // are word sized. If double arguments are used, this function assumes that
1082 // all double arguments are stored before core registers; otherwise the
1083 // correct alignment of the double values is not guaranteed.
1084 // Some compilers/platforms require the stack to be aligned when calling
1086 // Needs a scratch register to do some arithmetic. This register will be
1088 void PrepareCallCFunction(int num_reg_arguments,
1089 int num_double_registers,
1091 void PrepareCallCFunction(int num_reg_arguments,
1094 // There are two ways of passing double arguments on ARM, depending on
1095 // whether soft or hard floating point ABI is used. These functions
1096 // abstract parameter passing for the three different ways we call
1097 // C functions from generated code.
1098 void MovToFloatParameter(DwVfpRegister src);
1099 void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2);
1100 void MovToFloatResult(DwVfpRegister src);
1102 // Calls a C function and cleans up the space for arguments allocated
1103 // by PrepareCallCFunction. The called function is not allowed to trigger a
1104 // garbage collection, since that might move the code and invalidate the
1105 // return address (unless this is somehow accounted for by the called
1107 void CallCFunction(ExternalReference function, int num_arguments);
1108 void CallCFunction(Register function, int num_arguments);
1109 void CallCFunction(ExternalReference function,
1110 int num_reg_arguments,
1111 int num_double_arguments);
1112 void CallCFunction(Register function,
1113 int num_reg_arguments,
1114 int num_double_arguments);
1116 void MovFromFloatParameter(DwVfpRegister dst);
1117 void MovFromFloatResult(DwVfpRegister dst);
1119 // Jump to a runtime routine.
1120 void JumpToExternalReference(const ExternalReference& builtin);
1122 // Invoke specified builtin JavaScript function. Adds an entry to
1123 // the unresolved list if the name does not resolve.
1124 void InvokeBuiltin(Builtins::JavaScript id,
1126 const CallWrapper& call_wrapper = NullCallWrapper());
1128 // Store the code object for the given builtin in the target register and
1129 // setup the function in r1.
1130 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
1132 // Store the function for the given builtin in the target register.
1133 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1135 Handle<Object> CodeObject() {
1136 DCHECK(!code_object_.is_null());
1137 return code_object_;
1141 // Emit code for a truncating division by a constant. The dividend register is
1142 // unchanged and ip gets clobbered. Dividend and result must be different.
1143 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1145 // ---------------------------------------------------------------------------
1146 // StatsCounter support
1148 void SetCounter(StatsCounter* counter, int value,
1149 Register scratch1, Register scratch2);
1150 void IncrementCounter(StatsCounter* counter, int value,
1151 Register scratch1, Register scratch2);
1152 void DecrementCounter(StatsCounter* counter, int value,
1153 Register scratch1, Register scratch2);
1156 // ---------------------------------------------------------------------------
1159 // Calls Abort(msg) if the condition cond is not satisfied.
1160 // Use --debug_code to enable.
1161 void Assert(Condition cond, BailoutReason reason);
1162 void AssertFastElements(Register elements);
1164 // Like Assert(), but always enabled.
1165 void Check(Condition cond, BailoutReason reason);
1167 // Print a message to stdout and abort execution.
1168 void Abort(BailoutReason msg);
1170 // Verify restrictions about code generated in stubs.
1171 void set_generating_stub(bool value) { generating_stub_ = value; }
1172 bool generating_stub() { return generating_stub_; }
1173 void set_has_frame(bool value) { has_frame_ = value; }
1174 bool has_frame() { return has_frame_; }
1175 inline bool AllowThisStubCall(CodeStub* stub);
1177 // EABI variant for double arguments in use.
1178 bool use_eabi_hardfloat() {
1180 return base::OS::ArmUsingHardFloat();
1181 #elif USE_EABI_HARDFLOAT
1188 // ---------------------------------------------------------------------------
1191 // Check whether the value of reg is a power of two and not zero. If not
1192 // control continues at the label not_power_of_two. If reg is a power of two
1193 // the register scratch contains the value of (reg - 1) when control falls
1195 void JumpIfNotPowerOfTwoOrZero(Register reg,
1197 Label* not_power_of_two_or_zero);
1198 // Check whether the value of reg is a power of two and not zero.
1199 // Control falls through if it is, with scratch containing the mask
1201 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1202 // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1203 // strictly positive but not a power of two.
1204 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
1206 Label* zero_and_neg,
1207 Label* not_power_of_two);
1209 // ---------------------------------------------------------------------------
1212 void SmiTag(Register reg, SBit s = LeaveCC) {
1213 add(reg, reg, Operand(reg), s);
1215 void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
1216 add(dst, src, Operand(src), s);
1219 // Try to convert int32 to smi. If the value is to large, preserve
1220 // the original value and jump to not_a_smi. Destroys scratch and
1222 void TrySmiTag(Register reg, Label* not_a_smi) {
1223 TrySmiTag(reg, reg, not_a_smi);
1225 void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
1226 SmiTag(ip, src, SetCC);
1232 void SmiUntag(Register reg, SBit s = LeaveCC) {
1233 mov(reg, Operand::SmiUntag(reg), s);
1235 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
1236 mov(dst, Operand::SmiUntag(src), s);
1239 // Untag the source value into destination and jump if source is a smi.
1240 // Souce and destination can be the same register.
1241 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1243 // Untag the source value into destination and jump if source is not a smi.
1244 // Souce and destination can be the same register.
1245 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1247 // Test if the register contains a smi (Z == 0 (eq) if true).
1248 inline void SmiTst(Register value) {
1249 tst(value, Operand(kSmiTagMask));
1251 inline void NonNegativeSmiTst(Register value) {
1252 tst(value, Operand(kSmiTagMask | kSmiSignMask));
1254 // Jump if the register contains a smi.
1255 inline void JumpIfSmi(Register value, Label* smi_label) {
1256 tst(value, Operand(kSmiTagMask));
1259 // Jump if either of the registers contain a non-smi.
1260 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1261 tst(value, Operand(kSmiTagMask));
1262 b(ne, not_smi_label);
1264 // Jump if either of the registers contain a non-smi.
1265 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1266 // Jump if either of the registers contain a smi.
1267 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1269 // Abort execution if argument is a smi, enabled via --debug-code.
1270 void AssertNotSmi(Register object);
1271 void AssertSmi(Register object);
1273 // Abort execution if argument is not a string, enabled via --debug-code.
1274 void AssertString(Register object);
1276 // Abort execution if argument is not a name, enabled via --debug-code.
1277 void AssertName(Register object);
1279 // Abort execution if argument is not undefined or an AllocationSite, enabled
1280 // via --debug-code.
1281 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1283 // Abort execution if reg is not the root value with the given index,
1284 // enabled via --debug-code.
1285 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1287 // ---------------------------------------------------------------------------
1288 // HeapNumber utilities
1290 void JumpIfNotHeapNumber(Register object,
1291 Register heap_number_map,
1293 Label* on_not_heap_number);
1295 // ---------------------------------------------------------------------------
1298 // Generate code to do a lookup in the number string cache. If the number in
1299 // the register object is found in the cache the generated code falls through
1300 // with the result in the result register. The object and the result register
1301 // can be the same. If the number is not found in the cache the code jumps to
1302 // the label not_found with only the content of register object unchanged.
1303 void LookupNumberStringCache(Register object,
1310 // Checks if both objects are sequential one-byte strings and jumps to label
1311 // if either is not. Assumes that neither object is a smi.
1312 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
1318 // Checks if both objects are sequential one-byte strings and jumps to label
1319 // if either is not.
1320 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1323 Label* not_flat_one_byte_strings);
1325 // Checks if both instance types are sequential one-byte strings and jumps to
1326 // label if either is not.
1327 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1328 Register first_object_instance_type, Register second_object_instance_type,
1329 Register scratch1, Register scratch2, Label* failure);
1331 // Check if instance type is sequential one-byte string and jump to label if
1333 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1336 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1338 void EmitSeqStringSetCharCheck(Register string,
1341 uint32_t encoding_mask);
1343 // ---------------------------------------------------------------------------
1344 // Patching helpers.
1346 // Get the location of a relocated constant (its address in the constant pool)
1347 // from its load site.
1348 void GetRelocatedValueLocation(Register ldr_location, Register result,
1352 void ClampUint8(Register output_reg, Register input_reg);
1354 void ClampDoubleToUint8(Register result_reg,
1355 DwVfpRegister input_reg,
1356 LowDwVfpRegister double_scratch);
1359 void LoadInstanceDescriptors(Register map, Register descriptors);
1360 void EnumLength(Register dst, Register map);
1361 void NumberOfOwnDescriptors(Register dst, Register map);
1362 void LoadAccessor(Register dst, Register holder, int accessor_index,
1363 AccessorComponent accessor);
1365 template<typename Field>
1366 void DecodeField(Register dst, Register src) {
1367 Ubfx(dst, src, Field::kShift, Field::kSize);
1370 template<typename Field>
1371 void DecodeField(Register reg) {
1372 DecodeField<Field>(reg, reg);
1375 template<typename Field>
1376 void DecodeFieldToSmi(Register dst, Register src) {
1377 static const int shift = Field::kShift;
1378 static const int mask = Field::kMask >> shift << kSmiTagSize;
1379 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
1380 STATIC_ASSERT(kSmiTag == 0);
1381 if (shift < kSmiTagSize) {
1382 mov(dst, Operand(src, LSL, kSmiTagSize - shift));
1383 and_(dst, dst, Operand(mask));
1384 } else if (shift > kSmiTagSize) {
1385 mov(dst, Operand(src, LSR, shift - kSmiTagSize));
1386 and_(dst, dst, Operand(mask));
1388 and_(dst, src, Operand(mask));
1392 template<typename Field>
1393 void DecodeFieldToSmi(Register reg) {
1394 DecodeField<Field>(reg, reg);
1397 // Activation support.
1398 void EnterFrame(StackFrame::Type type,
1399 bool load_constant_pool_pointer_reg = false);
1400 // Returns the pc offset at which the frame ends.
1401 int LeaveFrame(StackFrame::Type type);
1403 // Expects object in r0 and returns map with validated enum cache
1404 // in r0. Assumes that any other register can be used as a scratch.
1405 void CheckEnumCache(Register null_value, Label* call_runtime);
1407 // AllocationMemento support. Arrays may have an associated
1408 // AllocationMemento object that can be checked for in order to pretransition
1410 // On entry, receiver_reg should point to the array object.
1411 // scratch_reg gets clobbered.
1412 // If allocation info is present, condition flags are set to eq.
1413 void TestJSArrayForAllocationMemento(Register receiver_reg,
1414 Register scratch_reg,
1415 Label* no_memento_found);
1417 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1418 Register scratch_reg,
1419 Label* memento_found) {
1420 Label no_memento_found;
1421 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1423 b(eq, memento_found);
1424 bind(&no_memento_found);
1427 // Jumps to found label if a prototype map has dictionary elements.
1428 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1429 Register scratch1, Label* found);
1432 void CallCFunctionHelper(Register function,
1433 int num_reg_arguments,
1434 int num_double_arguments);
1436 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
1438 // Helper functions for generating invokes.
1439 void InvokePrologue(const ParameterCount& expected,
1440 const ParameterCount& actual,
1441 Handle<Code> code_constant,
1444 bool* definitely_mismatches,
1446 const CallWrapper& call_wrapper);
1448 void InitializeNewString(Register string,
1450 Heap::RootListIndex map_index,
1454 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1455 void InNewSpace(Register object,
1457 Condition cond, // eq for new space, ne otherwise.
1460 // Helper for finding the mark bits for an address. Afterwards, the
1461 // bitmap register points at the word with the mark bits and the mask
1462 // the position of the first bit. Leaves addr_reg unchanged.
1463 inline void GetMarkBits(Register addr_reg,
1464 Register bitmap_reg,
1467 // Helper for throwing exceptions. Compute a handler address and jump to
1468 // it. See the implementation for register usage.
1469 void JumpToHandlerEntry();
1471 // Compute memory operands for safepoint stack slots.
1472 static int SafepointRegisterStackIndex(int reg_code);
1473 MemOperand SafepointRegisterSlot(Register reg);
1474 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1476 // Loads the constant pool pointer (pp) register.
1477 void LoadConstantPoolPointerRegister();
1479 bool generating_stub_;
1481 // This handle will be patched with the code object on installation.
1482 Handle<Object> code_object_;
1484 // Needs access to SafepointRegisterStackIndex for compiled frame
1486 friend class StandardFrame;
1490 // The code patcher is used to patch (typically) small parts of code e.g. for
1491 // debugging and other types of instrumentation. When using the code patcher
1492 // the exact number of bytes specified must be emitted. It is not legal to emit
1493 // relocation information. If any of these constraints are violated it causes
1494 // an assertion to fail.
1502 CodePatcher(byte* address,
1504 FlushICache flush_cache = FLUSH);
1505 virtual ~CodePatcher();
1507 // Macro assembler to emit code.
1508 MacroAssembler* masm() { return &masm_; }
1510 // Emit an instruction directly.
1511 void Emit(Instr instr);
1513 // Emit an address directly.
1514 void Emit(Address addr);
1516 // Emit the condition part of an instruction leaving the rest of the current
1517 // instruction unchanged.
1518 void EmitCondition(Condition cond);
1521 byte* address_; // The address of the code being patched.
1522 int size_; // Number of bytes of the expected patch size.
1523 MacroAssembler masm_; // Macro assembler used to generate the code.
1524 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1528 // -----------------------------------------------------------------------------
1529 // Static helper functions.
1531 inline MemOperand ContextOperand(Register context, int index) {
1532 return MemOperand(context, Context::SlotOffset(index));
1536 inline MemOperand GlobalObjectOperand() {
1537 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
1541 #ifdef GENERATED_CODE_COVERAGE
1542 #define CODE_COVERAGE_STRINGIFY(x) #x
1543 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1544 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1545 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1547 #define ACCESS_MASM(masm) masm->
1551 } } // namespace v8::internal
1553 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_