1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
29 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
31 #include "assembler.h"
33 #include "v8globals.h"
38 // ----------------------------------------------------------------------------
39 // Static helper functions
41 // Generate a MemOperand for loading a field from an object.
42 inline MemOperand FieldMemOperand(Register object, int offset) {
43 return MemOperand(object, offset - kHeapObjectTag);
47 // Give alias names to registers
48 const Register pp = { kRegister_r7_Code }; // Constant pool pointer.
49 const Register cp = { kRegister_r8_Code }; // JavaScript context pointer.
50 const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
52 // Flags used for AllocateHeapNumber
61 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
62 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
63 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
66 Register GetRegisterThatIsNotOneOf(Register reg1,
67 Register reg2 = no_reg,
68 Register reg3 = no_reg,
69 Register reg4 = no_reg,
70 Register reg5 = no_reg,
71 Register reg6 = no_reg);
75 bool AreAliased(Register reg1,
77 Register reg3 = no_reg,
78 Register reg4 = no_reg,
79 Register reg5 = no_reg,
80 Register reg6 = no_reg);
84 enum TargetAddressStorageMode {
85 CAN_INLINE_TARGET_ADDRESS,
86 NEVER_INLINE_TARGET_ADDRESS
89 // MacroAssembler implements a collection of frequently used macros.
90 class MacroAssembler: public Assembler {
92 // The isolate parameter can be NULL if the macro assembler should
93 // not use isolate-dependent functionality. In this case, it's the
94 // responsibility of the caller to never invoke such function on the
96 MacroAssembler(Isolate* isolate, void* buffer, int size);
98 // Jump, Call, and Ret pseudo instructions implementing inter-working.
99 void Jump(Register target, Condition cond = al);
100 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
101 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
102 static int CallSize(Register target, Condition cond = al);
103 void Call(Register target, Condition cond = al);
104 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
105 static int CallSizeNotPredictableCodeSize(Address target,
106 RelocInfo::Mode rmode,
107 Condition cond = al);
108 void Call(Address target, RelocInfo::Mode rmode,
110 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
111 int CallSize(Handle<Code> code,
112 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
113 TypeFeedbackId ast_id = TypeFeedbackId::None(),
114 Condition cond = al);
115 void Call(Handle<Code> code,
116 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
117 TypeFeedbackId ast_id = TypeFeedbackId::None(),
119 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
120 void Ret(Condition cond = al);
122 // Emit code to discard a non-negative number of pointer-sized elements
123 // from the stack, clobbering only the sp register.
124 void Drop(int count, Condition cond = al);
126 void Ret(int drop, Condition cond = al);
128 // Swap two registers. If the scratch register is omitted then a slightly
129 // less efficient form using xor instead of mov is emitted.
130 void Swap(Register reg1,
132 Register scratch = no_reg,
133 Condition cond = al);
136 void And(Register dst, Register src1, const Operand& src2,
137 Condition cond = al);
138 void Ubfx(Register dst, Register src, int lsb, int width,
139 Condition cond = al);
140 void Sbfx(Register dst, Register src, int lsb, int width,
141 Condition cond = al);
142 // The scratch register is not used for ARMv7.
143 // scratch can be the same register as src (in which case it is trashed), but
144 // not the same as dst.
145 void Bfi(Register dst,
150 Condition cond = al);
151 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
152 void Usat(Register dst, int satpos, const Operand& src,
153 Condition cond = al);
155 void Call(Label* target);
156 void Push(Register src) { push(src); }
157 void Pop(Register dst) { pop(dst); }
159 // Register move. May do nothing if the registers are identical.
160 void Move(Register dst, Handle<Object> value);
161 void Move(Register dst, Register src, Condition cond = al);
162 void Move(DwVfpRegister dst, DwVfpRegister src);
164 // Load an object from the root table.
165 void LoadRoot(Register destination,
166 Heap::RootListIndex index,
167 Condition cond = al);
168 // Store an object to the root table.
169 void StoreRoot(Register source,
170 Heap::RootListIndex index,
171 Condition cond = al);
173 // ---------------------------------------------------------------------------
176 void IncrementalMarkingRecordWriteHelper(Register object,
180 enum RememberedSetFinalAction {
185 // Record in the remembered set the fact that we have a pointer to new space
186 // at the address pointed to by the addr register. Only works if addr is not
188 void RememberedSetHelper(Register object, // Used for debug code.
191 SaveFPRegsMode save_fp,
192 RememberedSetFinalAction and_then);
194 void CheckPageFlag(Register object,
198 Label* condition_met);
200 void CheckMapDeprecated(Handle<Map> map,
202 Label* if_deprecated);
204 // Check if object is in new space. Jumps if the object is not in new space.
205 // The register scratch can be object itself, but scratch will be clobbered.
206 void JumpIfNotInNewSpace(Register object,
209 InNewSpace(object, scratch, ne, branch);
212 // Check if object is in new space. Jumps if the object is in new space.
213 // The register scratch can be object itself, but it will be clobbered.
214 void JumpIfInNewSpace(Register object,
217 InNewSpace(object, scratch, eq, branch);
220 // Check if an object has a given incremental marking color.
221 void HasColor(Register object,
228 void JumpIfBlack(Register object,
233 // Checks the color of an object. If the object is already grey or black
234 // then we just fall through, since it is already live. If it is white and
235 // we can determine that it doesn't need to be scanned, then we just mark it
236 // black and fall through. For the rest we jump to the label so the
237 // incremental marker can fix its assumptions.
238 void EnsureNotWhite(Register object,
242 Label* object_is_white_and_not_data);
244 // Detects conservatively whether an object is data-only, i.e. it does need to
245 // be scanned by the garbage collector.
246 void JumpIfDataObject(Register value,
248 Label* not_data_object);
250 // Notify the garbage collector that we wrote a pointer into an object.
251 // |object| is the object being stored into, |value| is the object being
252 // stored. value and scratch registers are clobbered by the operation.
253 // The offset is the offset from the start of the object, not the offset from
254 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
255 void RecordWriteField(
260 LinkRegisterStatus lr_status,
261 SaveFPRegsMode save_fp,
262 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
263 SmiCheck smi_check = INLINE_SMI_CHECK);
265 // As above, but the offset has the tag presubtracted. For use with
266 // MemOperand(reg, off).
267 inline void RecordWriteContextSlot(
272 LinkRegisterStatus lr_status,
273 SaveFPRegsMode save_fp,
274 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
275 SmiCheck smi_check = INLINE_SMI_CHECK) {
276 RecordWriteField(context,
277 offset + kHeapObjectTag,
282 remembered_set_action,
286 // For a given |object| notify the garbage collector that the slot |address|
287 // has been written. |value| is the object being stored. The value and
288 // address registers are clobbered by the operation.
293 LinkRegisterStatus lr_status,
294 SaveFPRegsMode save_fp,
295 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
296 SmiCheck smi_check = INLINE_SMI_CHECK);
299 void Push(Handle<Object> handle);
300 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
302 // Push two registers. Pushes leftmost register first (to highest address).
303 void Push(Register src1, Register src2, Condition cond = al) {
304 ASSERT(!src1.is(src2));
305 if (src1.code() > src2.code()) {
306 stm(db_w, sp, src1.bit() | src2.bit(), cond);
308 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
309 str(src2, MemOperand(sp, 4, NegPreIndex), cond);
313 // Push three registers. Pushes leftmost register first (to highest address).
314 void Push(Register src1, Register src2, Register src3, Condition cond = al) {
315 ASSERT(!src1.is(src2));
316 ASSERT(!src2.is(src3));
317 ASSERT(!src1.is(src3));
318 if (src1.code() > src2.code()) {
319 if (src2.code() > src3.code()) {
320 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
322 stm(db_w, sp, src1.bit() | src2.bit(), cond);
323 str(src3, MemOperand(sp, 4, NegPreIndex), cond);
326 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
327 Push(src2, src3, cond);
331 // Push four registers. Pushes leftmost register first (to highest address).
332 void Push(Register src1,
336 Condition cond = al) {
337 ASSERT(!src1.is(src2));
338 ASSERT(!src2.is(src3));
339 ASSERT(!src1.is(src3));
340 ASSERT(!src1.is(src4));
341 ASSERT(!src2.is(src4));
342 ASSERT(!src3.is(src4));
343 if (src1.code() > src2.code()) {
344 if (src2.code() > src3.code()) {
345 if (src3.code() > src4.code()) {
348 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
351 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
352 str(src4, MemOperand(sp, 4, NegPreIndex), cond);
355 stm(db_w, sp, src1.bit() | src2.bit(), cond);
356 Push(src3, src4, cond);
359 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
360 Push(src2, src3, src4, cond);
364 // Pop two registers. Pops rightmost register first (from lower address).
365 void Pop(Register src1, Register src2, Condition cond = al) {
366 ASSERT(!src1.is(src2));
367 if (src1.code() > src2.code()) {
368 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
370 ldr(src2, MemOperand(sp, 4, PostIndex), cond);
371 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
375 // Pop three registers. Pops rightmost register first (from lower address).
376 void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
377 ASSERT(!src1.is(src2));
378 ASSERT(!src2.is(src3));
379 ASSERT(!src1.is(src3));
380 if (src1.code() > src2.code()) {
381 if (src2.code() > src3.code()) {
382 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
384 ldr(src3, MemOperand(sp, 4, PostIndex), cond);
385 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
388 Pop(src2, src3, cond);
389 str(src1, MemOperand(sp, 4, PostIndex), cond);
393 // Pop four registers. Pops rightmost register first (from lower address).
394 void Pop(Register src1,
398 Condition cond = al) {
399 ASSERT(!src1.is(src2));
400 ASSERT(!src2.is(src3));
401 ASSERT(!src1.is(src3));
402 ASSERT(!src1.is(src4));
403 ASSERT(!src2.is(src4));
404 ASSERT(!src3.is(src4));
405 if (src1.code() > src2.code()) {
406 if (src2.code() > src3.code()) {
407 if (src3.code() > src4.code()) {
410 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
413 ldr(src4, MemOperand(sp, 4, PostIndex), cond);
414 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
417 Pop(src3, src4, cond);
418 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
421 Pop(src2, src3, src4, cond);
422 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
426 // Push and pop the registers that can hold pointers, as defined by the
427 // RegList constant kSafepointSavedRegisters.
428 void PushSafepointRegisters();
429 void PopSafepointRegisters();
430 void PushSafepointRegistersAndDoubles();
431 void PopSafepointRegistersAndDoubles();
432 // Store value in register src in the safepoint stack slot for
434 void StoreToSafepointRegisterSlot(Register src, Register dst);
435 void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
436 // Load the value of the src register from its safepoint stack slot
437 // into register dst.
438 void LoadFromSafepointRegisterSlot(Register dst, Register src);
440 // Load two consecutive registers with two consecutive memory locations.
441 void Ldrd(Register dst1,
443 const MemOperand& src,
444 Condition cond = al);
446 // Store two consecutive registers to two consecutive memory locations.
447 void Strd(Register src1,
449 const MemOperand& dst,
450 Condition cond = al);
452 // Ensure that FPSCR contains values needed by JavaScript.
453 // We need the NaNModeControlBit to be sure that operations like
454 // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
455 // In VFP3 it will be always the Canonical NaN.
456 // In VFP2 it will be either the Canonical NaN or the negative version
457 // of the Canonical NaN. It doesn't matter if we have two values. The aim
458 // is to be sure to never generate the hole NaN.
459 void VFPEnsureFPSCRState(Register scratch);
461 // If the value is a NaN, canonicalize the value else, do nothing.
462 void VFPCanonicalizeNaN(const DwVfpRegister dst,
463 const DwVfpRegister src,
464 const Condition cond = al);
465 void VFPCanonicalizeNaN(const DwVfpRegister value,
466 const Condition cond = al) {
467 VFPCanonicalizeNaN(value, value, cond);
470 // Compare double values and move the result to the normal condition flags.
471 void VFPCompareAndSetFlags(const DwVfpRegister src1,
472 const DwVfpRegister src2,
473 const Condition cond = al);
474 void VFPCompareAndSetFlags(const DwVfpRegister src1,
476 const Condition cond = al);
478 // Compare double values and then load the fpscr flags to a register.
479 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
480 const DwVfpRegister src2,
481 const Register fpscr_flags,
482 const Condition cond = al);
483 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
485 const Register fpscr_flags,
486 const Condition cond = al);
488 void Vmov(const DwVfpRegister dst,
490 const Register scratch = no_reg);
492 void VmovHigh(Register dst, DwVfpRegister src);
493 void VmovHigh(DwVfpRegister dst, Register src);
494 void VmovLow(Register dst, DwVfpRegister src);
495 void VmovLow(DwVfpRegister dst, Register src);
497 // Loads the number from object into dst register.
498 // If |object| is neither smi nor heap number, |not_number| is jumped to
499 // with |object| still intact.
500 void LoadNumber(Register object,
501 LowDwVfpRegister dst,
502 Register heap_number_map,
506 // Loads the number from object into double_dst in the double format.
507 // Control will jump to not_int32 if the value cannot be exactly represented
508 // by a 32-bit integer.
509 // Floating point value in the 32-bit integer range that are not exact integer
511 void LoadNumberAsInt32Double(Register object,
512 DwVfpRegister double_dst,
513 Register heap_number_map,
515 LowDwVfpRegister double_scratch,
518 // Loads the number from object into dst as a 32-bit integer.
519 // Control will jump to not_int32 if the object cannot be exactly represented
520 // by a 32-bit integer.
521 // Floating point value in the 32-bit integer range that are not exact integer
522 // won't be converted.
523 void LoadNumberAsInt32(Register object,
525 Register heap_number_map,
527 DwVfpRegister double_scratch0,
528 LowDwVfpRegister double_scratch1,
531 // Generates function and stub prologue code.
532 void Prologue(PrologueFrameMode frame_mode);
535 // stack_space - extra stack space, used for alignment before call to C.
536 void EnterExitFrame(bool save_doubles, int stack_space = 0);
538 // Leave the current exit frame. Expects the return value in r0.
539 // Expect the number of values, pushed prior to the exit frame, to
540 // remove in a register (or no_reg, if there is nothing to remove).
541 void LeaveExitFrame(bool save_doubles,
542 Register argument_count,
543 bool restore_context);
545 // Get the actual activation frame alignment for target environment.
546 static int ActivationFrameAlignment();
548 void LoadContext(Register dst, int context_chain_length);
550 // Conditionally load the cached Array transitioned map of type
551 // transitioned_kind from the native context if the map in register
552 // map_in_out is the cached Array map in the native context of
554 void LoadTransitionedArrayMapConditional(
555 ElementsKind expected_kind,
556 ElementsKind transitioned_kind,
559 Label* no_map_match);
561 // Load the initial map for new Arrays from a JSFunction.
562 void LoadInitialArrayMap(Register function_in,
565 bool can_have_holes);
567 void LoadGlobalFunction(int index, Register function);
568 void LoadArrayFunction(Register function);
570 // Load the initial map from the global function. The registers
571 // function and map can be the same, function is then overwritten.
572 void LoadGlobalFunctionInitialMap(Register function,
576 void InitializeRootRegister() {
577 ExternalReference roots_array_start =
578 ExternalReference::roots_array_start(isolate());
579 mov(kRootRegister, Operand(roots_array_start));
582 // ---------------------------------------------------------------------------
583 // JavaScript invokes
585 // Set up call kind marking in ecx. The method takes ecx as an
586 // explicit first parameter to make the code more readable at the
588 void SetCallKind(Register dst, CallKind kind);
590 // Invoke the JavaScript function code by either calling or jumping.
591 void InvokeCode(Register code,
592 const ParameterCount& expected,
593 const ParameterCount& actual,
595 const CallWrapper& call_wrapper,
598 void InvokeCode(Handle<Code> code,
599 const ParameterCount& expected,
600 const ParameterCount& actual,
601 RelocInfo::Mode rmode,
605 // Invoke the JavaScript function in the given register. Changes the
606 // current context to the context in the function before invoking.
607 void InvokeFunction(Register function,
608 const ParameterCount& actual,
610 const CallWrapper& call_wrapper,
613 void InvokeFunction(Handle<JSFunction> function,
614 const ParameterCount& expected,
615 const ParameterCount& actual,
617 const CallWrapper& call_wrapper,
620 void IsObjectJSObjectType(Register heap_object,
625 void IsInstanceJSObjectType(Register map,
629 void IsObjectJSStringType(Register object,
633 void IsObjectNameType(Register object,
637 #ifdef ENABLE_DEBUGGER_SUPPORT
638 // ---------------------------------------------------------------------------
644 // ---------------------------------------------------------------------------
645 // Exception handling
647 // Push a new try handler and link into try handler chain.
648 void PushTryHandler(StackHandler::Kind kind, int handler_index);
650 // Unlink the stack handler on top of the stack from the try handler chain.
651 // Must preserve the result register.
652 void PopTryHandler();
654 // Passes thrown value to the handler of top of the try handler chain.
655 void Throw(Register value);
657 // Propagates an uncatchable exception to the top of the current JS stack's
659 void ThrowUncatchable(Register value);
661 // ---------------------------------------------------------------------------
662 // Inline caching support
664 // Generate code for checking access rights - used for security checks
665 // on access to global objects across environments. The holder register
666 // is left untouched, whereas both scratch registers are clobbered.
667 void CheckAccessGlobalProxy(Register holder_reg,
671 void GetNumberHash(Register t0, Register scratch);
673 void LoadFromNumberDictionary(Label* miss,
682 inline void MarkCode(NopMarkerTypes type) {
686 // Check if the given instruction is a 'type' marker.
687 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
688 // These instructions are generated to mark special location in the code,
689 // like some special IC code.
690 static inline bool IsMarkedCode(Instr instr, int type) {
691 ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
692 return IsNop(instr, type);
696 static inline int GetCodeMarker(Instr instr) {
697 int dst_reg_offset = 12;
698 int dst_mask = 0xf << dst_reg_offset;
700 int dst_reg = (instr & dst_mask) >> dst_reg_offset;
701 int src_reg = instr & src_mask;
702 uint32_t non_register_mask = ~(dst_mask | src_mask);
703 uint32_t mov_mask = al | 13 << 21;
705 // Return <n> if we have a mov rn rn, else return -1.
706 int type = ((instr & non_register_mask) == mov_mask) &&
707 (dst_reg == src_reg) &&
708 (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
711 ASSERT((type == -1) ||
712 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
717 // ---------------------------------------------------------------------------
718 // Allocation support
720 // Allocate an object in new space or old pointer space. The object_size is
721 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
722 // is passed. If the space is exhausted control continues at the gc_required
723 // label. The allocated object is returned in result. If the flag
724 // tag_allocated_object is true the result is tagged as as a heap object.
725 // All registers are clobbered also when control continues at the gc_required
727 void Allocate(int object_size,
732 AllocationFlags flags);
734 void Allocate(Register object_size,
739 AllocationFlags flags);
741 // Undo allocation in new space. The object passed and objects allocated after
742 // it will no longer be allocated. The caller must make sure that no pointers
743 // are left to the object(s) no longer allocated as they would be invalid when
744 // allocation is undone.
745 void UndoAllocationInNewSpace(Register object, Register scratch);
748 void AllocateTwoByteString(Register result,
754 void AllocateAsciiString(Register result,
760 void AllocateTwoByteConsString(Register result,
765 void AllocateAsciiConsString(Register result,
770 void AllocateTwoByteSlicedString(Register result,
775 void AllocateAsciiSlicedString(Register result,
781 // Allocates a heap number or jumps to the gc_required label if the young
782 // space is full and a scavenge is needed. All registers are clobbered also
783 // when control continues at the gc_required label.
784 void AllocateHeapNumber(Register result,
787 Register heap_number_map,
789 TaggingMode tagging_mode = TAG_RESULT);
790 void AllocateHeapNumberWithValue(Register result,
794 Register heap_number_map,
797 // Copies a fixed number of fields of heap objects from src to dst.
798 void CopyFields(Register dst,
800 LowDwVfpRegister double_scratch,
803 // Copies a number of bytes from src to dst. All registers are clobbered. On
804 // exit src and dst will point to the place just after where the last byte was
805 // read or written and length will be zero.
806 void CopyBytes(Register src,
811 // Initialize fields with filler values. Fields starting at |start_offset|
812 // not including end_offset are overwritten with the value in |filler|. At
813 // the end the loop, |start_offset| takes the value of |end_offset|.
814 void InitializeFieldsWithFiller(Register start_offset,
818 // ---------------------------------------------------------------------------
819 // Support functions.
821 // Try to get function prototype of a function and puts the value in
822 // the result register. Checks that the function really is a
823 // function and jumps to the miss label if the fast checks fail. The
824 // function register will be untouched; the other registers may be
826 void TryGetFunctionPrototype(Register function,
830 bool miss_on_bound_function = false);
832 // Compare object type for heap object. heap_object contains a non-Smi
833 // whose object type should be compared with the given type. This both
834 // sets the flags and leaves the object type in the type_reg register.
835 // It leaves the map in the map register (unless the type_reg and map register
836 // are the same register). It leaves the heap object in the heap_object
837 // register unless the heap_object register is the same register as one of the
839 void CompareObjectType(Register heap_object,
844 // Compare instance type in a map. map contains a valid map object whose
845 // object type should be compared with the given type. This both
846 // sets the flags and leaves the object type in the type_reg register.
847 void CompareInstanceType(Register map,
852 // Check if a map for a JSObject indicates that the object has fast elements.
853 // Jump to the specified label if it does not.
854 void CheckFastElements(Register map,
858 // Check if a map for a JSObject indicates that the object can have both smi
859 // and HeapObject elements. Jump to the specified label if it does not.
860 void CheckFastObjectElements(Register map,
864 // Check if a map for a JSObject indicates that the object has fast smi only
865 // elements. Jump to the specified label if it does not.
866 void CheckFastSmiElements(Register map,
870 // Check to see if maybe_number can be stored as a double in
871 // FastDoubleElements. If it can, store it at the index specified by key in
872 // the FastDoubleElements array elements. Otherwise jump to fail.
873 void StoreNumberToDoubleElements(Register value_reg,
875 Register elements_reg,
877 LowDwVfpRegister double_scratch,
879 int elements_offset = 0);
881 // Compare an object's map with the specified map and its transitioned
882 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
883 // set with result of map compare. If multiple map compares are required, the
884 // compare sequences branches to early_success.
885 void CompareMap(Register obj,
888 Label* early_success);
890 // As above, but the map of the object is already loaded into the register
891 // which is preserved by the code generated.
892 void CompareMap(Register obj_map,
894 Label* early_success);
896 // Check if the map of an object is equal to a specified map and branch to
897 // label if not. Skip the smi check if not required (object is known to be a
898 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
899 // against maps that are ElementsKind transition maps of the specified map.
900 void CheckMap(Register obj,
904 SmiCheckType smi_check_type);
907 void CheckMap(Register obj,
909 Heap::RootListIndex index,
911 SmiCheckType smi_check_type);
914 // Check if the map of an object is equal to a specified map and branch to a
915 // specified target if equal. Skip the smi check if not required (object is
916 // known to be a heap object)
917 void DispatchMap(Register obj,
920 Handle<Code> success,
921 SmiCheckType smi_check_type);
924 // Compare the object in a register to a value from the root list.
925 // Uses the ip register as scratch.
926 void CompareRoot(Register obj, Heap::RootListIndex index);
929 // Load and check the instance type of an object for being a string.
930 // Loads the type into the second argument register.
931 // Returns a condition that will be enabled if the object was a string
932 // and the passed-in condition passed. If the passed-in condition failed
933 // then flags remain unchanged.
934 Condition IsObjectStringType(Register obj,
936 Condition cond = al) {
937 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
938 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
939 tst(type, Operand(kIsNotStringMask), cond);
940 ASSERT_EQ(0, kStringTag);
945 // Generates code for reporting that an illegal operation has
947 void IllegalOperation(int num_arguments);
949 // Picks out an array index from the hash field.
951 // hash - holds the index's hash. Clobbered.
952 // index - holds the overwritten index on exit.
953 void IndexFromHash(Register hash, Register index);
955 // Get the number of least significant bits from a register
956 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
957 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
959 // Load the value of a smi object into a double register.
960 // The register value must be between d0 and d15.
961 void SmiToDouble(LowDwVfpRegister value, Register smi);
963 // Check if a double can be exactly represented as a signed 32-bit integer.
964 // Z flag set to one if true.
965 void TestDoubleIsInt32(DwVfpRegister double_input,
966 LowDwVfpRegister double_scratch);
968 // Try to convert a double to a signed 32-bit integer.
969 // Z flag set to one and result assigned if the conversion is exact.
970 void TryDoubleToInt32Exact(Register result,
971 DwVfpRegister double_input,
972 LowDwVfpRegister double_scratch);
974 // Floor a double and writes the value to the result register.
975 // Go to exact if the conversion is exact (to be able to test -0),
976 // fall through calling code if an overflow occurred, else go to done.
977 // In return, input_high is loaded with high bits of input.
978 void TryInt32Floor(Register result,
979 DwVfpRegister double_input,
981 LowDwVfpRegister double_scratch,
985 // Performs a truncating conversion of a floating point number as used by
986 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
987 // succeeds, otherwise falls through if result is saturated. On return
988 // 'result' either holds answer, or is clobbered on fall through.
990 // Only public for the test code in test-code-stubs-arm.cc.
991 void TryInlineTruncateDoubleToI(Register result,
995 // Performs a truncating conversion of a floating point number as used by
996 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
997 // Exits with 'result' holding the answer.
998 void TruncateDoubleToI(Register result, DwVfpRegister double_input);
1000 // Performs a truncating conversion of a heap number as used by
1001 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1002 // must be different registers. Exits with 'result' holding the answer.
1003 void TruncateHeapNumberToI(Register result, Register object);
1005 // Converts the smi or heap number in object to an int32 using the rules
1006 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1007 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1008 // different registers.
1009 void TruncateNumberToI(Register object,
1011 Register heap_number_map,
1015 // Check whether d16-d31 are available on the CPU. The result is given by the
1016 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
1017 void CheckFor32DRegs(Register scratch);
1019 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
1020 // values to location, saving [d0..(d15|d31)].
1021 void SaveFPRegs(Register location, Register scratch);
1023 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
1024 // values to location, restoring [d0..(d15|d31)].
1025 void RestoreFPRegs(Register location, Register scratch);
1027 // ---------------------------------------------------------------------------
1030 // Call a code stub.
1031 void CallStub(CodeStub* stub,
1032 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1033 Condition cond = al);
1035 // Call a code stub.
1036 void TailCallStub(CodeStub* stub, Condition cond = al);
1038 // Call a runtime routine.
1039 void CallRuntime(const Runtime::Function* f,
1041 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1042 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1043 const Runtime::Function* function = Runtime::FunctionForId(id);
1044 CallRuntime(function, function->nargs, kSaveFPRegs);
1047 // Convenience function: Same as above, but takes the fid instead.
1048 void CallRuntime(Runtime::FunctionId id, int num_arguments) {
1049 CallRuntime(Runtime::FunctionForId(id), num_arguments);
1052 // Convenience function: call an external reference.
1053 void CallExternalReference(const ExternalReference& ext,
1056 // Tail call of a runtime routine (jump).
1057 // Like JumpToExternalReference, but also takes care of passing the number
1059 void TailCallExternalReference(const ExternalReference& ext,
1063 // Convenience function: tail call a runtime routine (jump).
1064 void TailCallRuntime(Runtime::FunctionId fid,
1068 int CalculateStackPassedWords(int num_reg_arguments,
1069 int num_double_arguments);
1071 // Before calling a C-function from generated code, align arguments on stack.
1072 // After aligning the frame, non-register arguments must be stored in
1073 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
1074 // are word sized. If double arguments are used, this function assumes that
1075 // all double arguments are stored before core registers; otherwise the
1076 // correct alignment of the double values is not guaranteed.
1077 // Some compilers/platforms require the stack to be aligned when calling
1079 // Needs a scratch register to do some arithmetic. This register will be
1081 void PrepareCallCFunction(int num_reg_arguments,
1082 int num_double_registers,
1084 void PrepareCallCFunction(int num_reg_arguments,
1087 // There are two ways of passing double arguments on ARM, depending on
1088 // whether soft or hard floating point ABI is used. These functions
1089 // abstract parameter passing for the three different ways we call
1090 // C functions from generated code.
1091 void SetCallCDoubleArguments(DwVfpRegister dreg);
1092 void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2);
1093 void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg);
1095 // Calls a C function and cleans up the space for arguments allocated
1096 // by PrepareCallCFunction. The called function is not allowed to trigger a
1097 // garbage collection, since that might move the code and invalidate the
1098 // return address (unless this is somehow accounted for by the called
1100 void CallCFunction(ExternalReference function, int num_arguments);
1101 void CallCFunction(Register function, int num_arguments);
1102 void CallCFunction(ExternalReference function,
1103 int num_reg_arguments,
1104 int num_double_arguments);
1105 void CallCFunction(Register function,
1106 int num_reg_arguments,
1107 int num_double_arguments);
1109 void GetCFunctionDoubleResult(const DwVfpRegister dst);
1111 // Calls an API function. Allocates HandleScope, extracts returned value
1112 // from handle and propagates exceptions. Restores context. stack_space
1113 // - space to be unwound on exit (includes the call JS arguments space and
1114 // the additional space allocated for the fast call).
1115 void CallApiFunctionAndReturn(ExternalReference function,
1116 Address function_address,
1117 ExternalReference thunk_ref,
1118 Register thunk_last_arg,
1120 MemOperand return_value_operand,
1121 MemOperand* context_restore_operand);
1123 // Jump to a runtime routine.
1124 void JumpToExternalReference(const ExternalReference& builtin);
1126 // Invoke specified builtin JavaScript function. Adds an entry to
1127 // the unresolved list if the name does not resolve.
1128 void InvokeBuiltin(Builtins::JavaScript id,
1130 const CallWrapper& call_wrapper = NullCallWrapper());
1132 // Store the code object for the given builtin in the target register and
1133 // setup the function in r1.
1134 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
1136 // Store the function for the given builtin in the target register.
1137 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1139 Handle<Object> CodeObject() {
1140 ASSERT(!code_object_.is_null());
1141 return code_object_;
1145 // ---------------------------------------------------------------------------
1146 // StatsCounter support
1148 void SetCounter(StatsCounter* counter, int value,
1149 Register scratch1, Register scratch2);
1150 void IncrementCounter(StatsCounter* counter, int value,
1151 Register scratch1, Register scratch2);
1152 void DecrementCounter(StatsCounter* counter, int value,
1153 Register scratch1, Register scratch2);
1156 // ---------------------------------------------------------------------------
1159 // Calls Abort(msg) if the condition cond is not satisfied.
1160 // Use --debug_code to enable.
1161 void Assert(Condition cond, BailoutReason reason);
1162 void AssertFastElements(Register elements);
1164 // Like Assert(), but always enabled.
1165 void Check(Condition cond, BailoutReason reason);
1167 // Print a message to stdout and abort execution.
1168 void Abort(BailoutReason msg);
1170 // Verify restrictions about code generated in stubs.
1171 void set_generating_stub(bool value) { generating_stub_ = value; }
1172 bool generating_stub() { return generating_stub_; }
1173 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
1174 bool allow_stub_calls() { return allow_stub_calls_; }
1175 void set_has_frame(bool value) { has_frame_ = value; }
1176 bool has_frame() { return has_frame_; }
1177 inline bool AllowThisStubCall(CodeStub* stub);
1179 // EABI variant for double arguments in use.
1180 bool use_eabi_hardfloat() {
1182 return OS::ArmUsingHardFloat();
1183 #elif USE_EABI_HARDFLOAT
1190 // ---------------------------------------------------------------------------
1193 // Check whether the value of reg is a power of two and not zero. If not
1194 // control continues at the label not_power_of_two. If reg is a power of two
1195 // the register scratch contains the value of (reg - 1) when control falls
1197 void JumpIfNotPowerOfTwoOrZero(Register reg,
1199 Label* not_power_of_two_or_zero);
1200 // Check whether the value of reg is a power of two and not zero.
1201 // Control falls through if it is, with scratch containing the mask
1203 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1204 // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1205 // strictly positive but not a power of two.
1206 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
1208 Label* zero_and_neg,
1209 Label* not_power_of_two);
1211 // ---------------------------------------------------------------------------
1214 void SmiTag(Register reg, SBit s = LeaveCC) {
1215 add(reg, reg, Operand(reg), s);
1217 void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
1218 add(dst, src, Operand(src), s);
1221 // Try to convert int32 to smi. If the value is to large, preserve
1222 // the original value and jump to not_a_smi. Destroys scratch and
1224 void TrySmiTag(Register reg, Label* not_a_smi) {
1225 TrySmiTag(reg, reg, not_a_smi);
1227 void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
1228 SmiTag(ip, src, SetCC);
1234 void SmiUntag(Register reg, SBit s = LeaveCC) {
1235 mov(reg, Operand::SmiUntag(reg), s);
1237 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
1238 mov(dst, Operand::SmiUntag(src), s);
1241 // Untag the source value into destination and jump if source is a smi.
1242 // Souce and destination can be the same register.
1243 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1245 // Untag the source value into destination and jump if source is not a smi.
1246 // Souce and destination can be the same register.
1247 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1249 // Test if the register contains a smi (Z == 0 (eq) if true).
1250 inline void SmiTst(Register value) {
1251 tst(value, Operand(kSmiTagMask));
1253 inline void NonNegativeSmiTst(Register value) {
1254 tst(value, Operand(kSmiTagMask | kSmiSignMask));
1256 // Jump if the register contains a smi.
1257 inline void JumpIfSmi(Register value, Label* smi_label) {
1258 tst(value, Operand(kSmiTagMask));
1261 // Jump if either of the registers contain a non-smi.
1262 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1263 tst(value, Operand(kSmiTagMask));
1264 b(ne, not_smi_label);
1266 // Jump if either of the registers contain a non-smi.
1267 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1268 // Jump if either of the registers contain a smi.
1269 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1271 // Abort execution if argument is a smi, enabled via --debug-code.
1272 void AssertNotSmi(Register object);
1273 void AssertSmi(Register object);
1275 // Abort execution if argument is not a string, enabled via --debug-code.
1276 void AssertString(Register object);
1278 // Abort execution if argument is not a name, enabled via --debug-code.
1279 void AssertName(Register object);
1281 // Abort execution if reg is not the root value with the given index,
1282 // enabled via --debug-code.
1283 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1285 // ---------------------------------------------------------------------------
1286 // HeapNumber utilities
1288 void JumpIfNotHeapNumber(Register object,
1289 Register heap_number_map,
1291 Label* on_not_heap_number);
1293 // ---------------------------------------------------------------------------
1296 // Generate code to do a lookup in the number string cache. If the number in
1297 // the register object is found in the cache the generated code falls through
1298 // with the result in the result register. The object and the result register
1299 // can be the same. If the number is not found in the cache the code jumps to
1300 // the label not_found with only the content of register object unchanged.
1301 void LookupNumberStringCache(Register object,
1308 // Checks if both objects are sequential ASCII strings and jumps to label
1309 // if either is not. Assumes that neither object is a smi.
1310 void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
1316 // Checks if both objects are sequential ASCII strings and jumps to label
1317 // if either is not.
1318 void JumpIfNotBothSequentialAsciiStrings(Register first,
1322 Label* not_flat_ascii_strings);
1324 // Checks if both instance types are sequential ASCII strings and jumps to
1325 // label if either is not.
1326 void JumpIfBothInstanceTypesAreNotSequentialAscii(
1327 Register first_object_instance_type,
1328 Register second_object_instance_type,
1333 // Check if instance type is sequential ASCII string and jump to label if
1335 void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
1339 void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
1341 // ---------------------------------------------------------------------------
1342 // Patching helpers.
1344 // Get the location of a relocated constant (its address in the constant pool)
1345 // from its load site.
1346 void GetRelocatedValueLocation(Register ldr_location,
1350 void ClampUint8(Register output_reg, Register input_reg);
1352 void ClampDoubleToUint8(Register result_reg,
1353 DwVfpRegister input_reg,
1354 LowDwVfpRegister double_scratch);
1357 void LoadInstanceDescriptors(Register map, Register descriptors);
1358 void EnumLength(Register dst, Register map);
1359 void NumberOfOwnDescriptors(Register dst, Register map);
1361 template<typename Field>
1362 void DecodeField(Register reg) {
1363 static const int shift = Field::kShift;
1364 static const int mask = (Field::kMask >> shift) << kSmiTagSize;
1365 mov(reg, Operand(reg, LSR, shift));
1366 and_(reg, reg, Operand(mask));
1369 // Activation support.
1370 void EnterFrame(StackFrame::Type type);
1371 void LeaveFrame(StackFrame::Type type);
1373 // Expects object in r0 and returns map with validated enum cache
1374 // in r0. Assumes that any other register can be used as a scratch.
1375 void CheckEnumCache(Register null_value, Label* call_runtime);
1377 // AllocationMemento support. Arrays may have an associated
1378 // AllocationMemento object that can be checked for in order to pretransition
1380 // On entry, receiver_reg should point to the array object.
1381 // scratch_reg gets clobbered.
1382 // If allocation info is present, condition flags are set to eq.
1383 void TestJSArrayForAllocationMemento(Register receiver_reg,
1384 Register scratch_reg,
1385 Label* no_memento_found);
1387 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1388 Register scratch_reg,
1389 Label* memento_found) {
1390 Label no_memento_found;
1391 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1393 b(eq, memento_found);
1394 bind(&no_memento_found);
1398 void CallCFunctionHelper(Register function,
1399 int num_reg_arguments,
1400 int num_double_arguments);
1402 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
1404 // Helper functions for generating invokes.
1405 void InvokePrologue(const ParameterCount& expected,
1406 const ParameterCount& actual,
1407 Handle<Code> code_constant,
1410 bool* definitely_mismatches,
1412 const CallWrapper& call_wrapper,
1413 CallKind call_kind);
1415 void InitializeNewString(Register string,
1417 Heap::RootListIndex map_index,
1421 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1422 void InNewSpace(Register object,
1424 Condition cond, // eq for new space, ne otherwise.
1427 // Helper for finding the mark bits for an address. Afterwards, the
1428 // bitmap register points at the word with the mark bits and the mask
1429 // the position of the first bit. Leaves addr_reg unchanged.
1430 inline void GetMarkBits(Register addr_reg,
1431 Register bitmap_reg,
1434 // Helper for throwing exceptions. Compute a handler address and jump to
1435 // it. See the implementation for register usage.
1436 void JumpToHandlerEntry();
1438 // Compute memory operands for safepoint stack slots.
1439 static int SafepointRegisterStackIndex(int reg_code);
1440 MemOperand SafepointRegisterSlot(Register reg);
1441 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1443 bool generating_stub_;
1444 bool allow_stub_calls_;
1446 // This handle will be patched with the code object on installation.
1447 Handle<Object> code_object_;
1449 // Needs access to SafepointRegisterStackIndex for compiled frame
1451 friend class StandardFrame;
1455 // The code patcher is used to patch (typically) small parts of code e.g. for
1456 // debugging and other types of instrumentation. When using the code patcher
1457 // the exact number of bytes specified must be emitted. It is not legal to emit
1458 // relocation information. If any of these constraints are violated it causes
1459 // an assertion to fail.
1467 CodePatcher(byte* address,
1469 FlushICache flush_cache = FLUSH);
1470 virtual ~CodePatcher();
1472 // Macro assembler to emit code.
1473 MacroAssembler* masm() { return &masm_; }
1475 // Emit an instruction directly.
1476 void Emit(Instr instr);
1478 // Emit an address directly.
1479 void Emit(Address addr);
1481 // Emit the condition part of an instruction leaving the rest of the current
1482 // instruction unchanged.
1483 void EmitCondition(Condition cond);
1486 byte* address_; // The address of the code being patched.
1487 int size_; // Number of bytes of the expected patch size.
1488 MacroAssembler masm_; // Macro assembler used to generate the code.
1489 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1493 // -----------------------------------------------------------------------------
1494 // Static helper functions.
1496 inline MemOperand ContextOperand(Register context, int index) {
1497 return MemOperand(context, Context::SlotOffset(index));
1501 inline MemOperand GlobalObjectOperand() {
1502 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
1506 #ifdef GENERATED_CODE_COVERAGE
1507 #define CODE_COVERAGE_STRINGIFY(x) #x
1508 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1509 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1510 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1512 #define ACCESS_MASM(masm) masm->
1516 } } // namespace v8::internal
1518 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_