1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
6 #define V8_X64_MACRO_ASSEMBLER_X64_H_
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/base/flags.h"
11 #include "src/frames.h"
12 #include "src/globals.h"
17 // Default scratch register used by MacroAssembler (and other code that needs
18 // a spare register). The register isn't callee save, and not used by the
19 // function calling convention.
20 const Register kScratchRegister = { 10 }; // r10.
21 const Register kRootRegister = { 13 }; // r13 (callee save).
22 // Actual value of root register is offset from the root array's start
23 // to take advantage of negitive 8-bit displacement values.
24 const int kRootRegisterBias = 128;
26 // Convenience for platform-independent signatures.
27 typedef Operand MemOperand;
29 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
30 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
31 enum PointersToHereCheck {
32 kPointersToHereMaybeInteresting,
33 kPointersToHereAreAlwaysInteresting
36 enum class SmiOperationConstraint {
37 kPreserveSourceRegister = 1 << 0,
38 kBailoutOnNoOverflow = 1 << 1,
39 kBailoutOnOverflow = 1 << 2
42 typedef base::Flags<SmiOperationConstraint> SmiOperationConstraints;
44 DEFINE_OPERATORS_FOR_FLAGS(SmiOperationConstraints)
47 bool AreAliased(Register reg1,
49 Register reg3 = no_reg,
50 Register reg4 = no_reg,
51 Register reg5 = no_reg,
52 Register reg6 = no_reg,
53 Register reg7 = no_reg,
54 Register reg8 = no_reg);
57 // Forward declaration.
61 SmiIndex(Register index_register, ScaleFactor scale)
62 : reg(index_register),
69 // MacroAssembler implements a collection of frequently used macros.
70 class MacroAssembler: public Assembler {
72 // The isolate parameter can be NULL if the macro assembler should
73 // not use isolate-dependent functionality. In this case, it's the
74 // responsibility of the caller to never invoke such function on the
76 MacroAssembler(Isolate* isolate, void* buffer, int size);
78 // Prevent the use of the RootArray during the lifetime of this
80 class NoRootArrayScope BASE_EMBEDDED {
82 explicit NoRootArrayScope(MacroAssembler* assembler)
83 : variable_(&assembler->root_array_available_),
84 old_value_(assembler->root_array_available_) {
85 assembler->root_array_available_ = false;
88 *variable_ = old_value_;
95 // Operand pointing to an external reference.
96 // May emit code to set up the scratch register. The operand is
97 // only guaranteed to be correct as long as the scratch register
99 // If the operand is used more than once, use a scratch register
100 // that is guaranteed not to be clobbered.
101 Operand ExternalOperand(ExternalReference reference,
102 Register scratch = kScratchRegister);
103 // Loads and stores the value of an external reference.
104 // Special case code for load and store to take advantage of
105 // load_rax/store_rax if possible/necessary.
106 // For other operations, just use:
107 // Operand operand = ExternalOperand(extref);
108 // operation(operand, ..);
109 void Load(Register destination, ExternalReference source);
110 void Store(ExternalReference destination, Register source);
111 // Loads the address of the external reference into the destination
113 void LoadAddress(Register destination, ExternalReference source);
114 // Returns the size of the code generated by LoadAddress.
115 // Used by CallSize(ExternalReference) to find the size of a call.
116 int LoadAddressSize(ExternalReference source);
117 // Pushes the address of the external reference onto the stack.
118 void PushAddress(ExternalReference source);
120 // Operations on roots in the root-array.
121 void LoadRoot(Register destination, Heap::RootListIndex index);
122 void StoreRoot(Register source, Heap::RootListIndex index);
123 // Load a root value where the index (or part of it) is variable.
124 // The variable_offset register is added to the fixed_offset value
125 // to get the index into the root-array.
126 void LoadRootIndexed(Register destination,
127 Register variable_offset,
129 void CompareRoot(Register with, Heap::RootListIndex index);
130 void CompareRoot(const Operand& with, Heap::RootListIndex index);
131 void PushRoot(Heap::RootListIndex index);
133 // These functions do not arrange the registers in any particular order so
134 // they are not useful for calls that can cause a GC. The caller can
135 // exclude up to 3 registers that do not need to be saved and restored.
136 void PushCallerSaved(SaveFPRegsMode fp_mode,
137 Register exclusion1 = no_reg,
138 Register exclusion2 = no_reg,
139 Register exclusion3 = no_reg);
140 void PopCallerSaved(SaveFPRegsMode fp_mode,
141 Register exclusion1 = no_reg,
142 Register exclusion2 = no_reg,
143 Register exclusion3 = no_reg);
145 // ---------------------------------------------------------------------------
149 enum RememberedSetFinalAction {
154 // Record in the remembered set the fact that we have a pointer to new space
155 // at the address pointed to by the addr register. Only works if addr is not
157 void RememberedSetHelper(Register object, // Used for debug code.
160 SaveFPRegsMode save_fp,
161 RememberedSetFinalAction and_then);
163 void CheckPageFlag(Register object,
167 Label* condition_met,
168 Label::Distance condition_met_distance = Label::kFar);
170 // Check if object is in new space. Jumps if the object is not in new space.
171 // The register scratch can be object itself, but scratch will be clobbered.
172 void JumpIfNotInNewSpace(Register object,
175 Label::Distance distance = Label::kFar) {
176 InNewSpace(object, scratch, not_equal, branch, distance);
179 // Check if object is in new space. Jumps if the object is in new space.
180 // The register scratch can be object itself, but it will be clobbered.
181 void JumpIfInNewSpace(Register object,
184 Label::Distance distance = Label::kFar) {
185 InNewSpace(object, scratch, equal, branch, distance);
188 // Check if an object has the black incremental marking color. Also uses rcx!
189 void JumpIfBlack(Register object,
193 Label::Distance on_black_distance = Label::kFar);
195 // Detects conservatively whether an object is data-only, i.e. it does need to
196 // be scanned by the garbage collector.
197 void JumpIfDataObject(Register value,
199 Label* not_data_object,
200 Label::Distance not_data_object_distance);
202 // Checks the color of an object. If the object is already grey or black
203 // then we just fall through, since it is already live. If it is white and
204 // we can determine that it doesn't need to be scanned, then we just mark it
205 // black and fall through. For the rest we jump to the label so the
206 // incremental marker can fix its assumptions.
207 void EnsureNotWhite(Register object,
210 Label* object_is_white_and_not_data,
211 Label::Distance distance);
213 // Notify the garbage collector that we wrote a pointer into an object.
214 // |object| is the object being stored into, |value| is the object being
215 // stored. value and scratch registers are clobbered by the operation.
216 // The offset is the offset from the start of the object, not the offset from
217 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
218 void RecordWriteField(
223 SaveFPRegsMode save_fp,
224 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
225 SmiCheck smi_check = INLINE_SMI_CHECK,
226 PointersToHereCheck pointers_to_here_check_for_value =
227 kPointersToHereMaybeInteresting);
229 // As above, but the offset has the tag presubtracted. For use with
230 // Operand(reg, off).
231 void RecordWriteContextSlot(
236 SaveFPRegsMode save_fp,
237 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
238 SmiCheck smi_check = INLINE_SMI_CHECK,
239 PointersToHereCheck pointers_to_here_check_for_value =
240 kPointersToHereMaybeInteresting) {
241 RecordWriteField(context,
242 offset + kHeapObjectTag,
246 remembered_set_action,
248 pointers_to_here_check_for_value);
251 // Notify the garbage collector that we wrote a pointer into a fixed array.
252 // |array| is the array being stored into, |value| is the
253 // object being stored. |index| is the array index represented as a non-smi.
254 // All registers are clobbered by the operation RecordWriteArray
255 // filters out smis so it does not update the write barrier if the
257 void RecordWriteArray(
261 SaveFPRegsMode save_fp,
262 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
263 SmiCheck smi_check = INLINE_SMI_CHECK,
264 PointersToHereCheck pointers_to_here_check_for_value =
265 kPointersToHereMaybeInteresting);
267 void RecordWriteForMap(
271 SaveFPRegsMode save_fp);
273 // For page containing |object| mark region covering |address|
274 // dirty. |object| is the object being stored into, |value| is the
275 // object being stored. The address and value registers are clobbered by the
276 // operation. RecordWrite filters out smis so it does not update
277 // the write barrier if the value is a smi.
282 SaveFPRegsMode save_fp,
283 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
284 SmiCheck smi_check = INLINE_SMI_CHECK,
285 PointersToHereCheck pointers_to_here_check_for_value =
286 kPointersToHereMaybeInteresting);
288 // ---------------------------------------------------------------------------
293 // Generates function and stub prologue code.
295 void Prologue(bool code_pre_aging);
297 // Enter specific kind of exit frame; either in normal or
298 // debug mode. Expects the number of arguments in register rax and
299 // sets up the number of arguments in register rdi and the pointer
300 // to the first argument in register rsi.
302 // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
303 // accessible via StackSpaceOperand.
304 void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
306 // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
307 // memory (not GCed) on the stack accessible via StackSpaceOperand.
308 void EnterApiExitFrame(int arg_stack_space);
310 // Leave the current exit frame. Expects/provides the return value in
311 // register rax:rdx (untouched) and the pointer to the first
312 // argument in register rsi.
313 void LeaveExitFrame(bool save_doubles = false);
315 // Leave the current exit frame. Expects/provides the return value in
316 // register rax (untouched).
317 void LeaveApiExitFrame(bool restore_context);
319 // Push and pop the registers that can hold pointers.
320 void PushSafepointRegisters() { Pushad(); }
321 void PopSafepointRegisters() { Popad(); }
322 // Store the value in register src in the safepoint register stack
323 // slot for register dst.
324 void StoreToSafepointRegisterSlot(Register dst, const Immediate& imm);
325 void StoreToSafepointRegisterSlot(Register dst, Register src);
326 void LoadFromSafepointRegisterSlot(Register dst, Register src);
328 void InitializeRootRegister() {
329 ExternalReference roots_array_start =
330 ExternalReference::roots_array_start(isolate());
331 Move(kRootRegister, roots_array_start);
332 addp(kRootRegister, Immediate(kRootRegisterBias));
335 // ---------------------------------------------------------------------------
336 // JavaScript invokes
338 // Invoke the JavaScript function code by either calling or jumping.
339 void InvokeCode(Register code,
340 const ParameterCount& expected,
341 const ParameterCount& actual,
343 const CallWrapper& call_wrapper);
345 // Invoke the JavaScript function in the given register. Changes the
346 // current context to the context in the function before invoking.
347 void InvokeFunction(Register function,
348 const ParameterCount& actual,
350 const CallWrapper& call_wrapper);
352 void InvokeFunction(Register function,
353 const ParameterCount& expected,
354 const ParameterCount& actual,
356 const CallWrapper& call_wrapper);
358 void InvokeFunction(Handle<JSFunction> function,
359 const ParameterCount& expected,
360 const ParameterCount& actual,
362 const CallWrapper& call_wrapper);
364 // Invoke specified builtin JavaScript function. Adds an entry to
365 // the unresolved list if the name does not resolve.
366 void InvokeBuiltin(Builtins::JavaScript id,
368 const CallWrapper& call_wrapper = NullCallWrapper());
370 // Store the function for the given builtin in the target register.
371 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
373 // Store the code object for the given builtin in the target register.
374 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
377 // ---------------------------------------------------------------------------
378 // Smi tagging, untagging and operations on tagged smis.
380 // Support for constant splitting.
381 bool IsUnsafeInt(const int32_t x);
382 void SafeMove(Register dst, Smi* src);
383 void SafePush(Smi* src);
385 // Conversions between tagged smi values and non-tagged integer values.
387 // Tag an integer value. The result must be known to be a valid smi value.
388 // Only uses the low 32 bits of the src register. Sets the N and Z flags
389 // based on the value of the resulting smi.
390 void Integer32ToSmi(Register dst, Register src);
392 // Stores an integer32 value into a memory field that already holds a smi.
393 void Integer32ToSmiField(const Operand& dst, Register src);
395 // Adds constant to src and tags the result as a smi.
396 // Result must be a valid smi.
397 void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
399 // Convert smi to 32-bit integer. I.e., not sign extended into
400 // high 32 bits of destination.
401 void SmiToInteger32(Register dst, Register src);
402 void SmiToInteger32(Register dst, const Operand& src);
404 // Convert smi to 64-bit integer (sign extended if necessary).
405 void SmiToInteger64(Register dst, Register src);
406 void SmiToInteger64(Register dst, const Operand& src);
408 // Multiply a positive smi's integer value by a power of two.
409 // Provides result as 64-bit integer value.
410 void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
414 // Divide a positive smi's integer value by a power of two.
415 // Provides result as 32-bit integer value.
416 void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
420 // Perform the logical or of two smi values and return a smi value.
421 // If either argument is not a smi, jump to on_not_smis and retain
422 // the original values of source registers. The destination register
423 // may be changed if it's not one of the source registers.
424 void SmiOrIfSmis(Register dst,
428 Label::Distance near_jump = Label::kFar);
431 // Simple comparison of smis. Both sides must be known smis to use these,
432 // otherwise use Cmp.
433 void SmiCompare(Register smi1, Register smi2);
434 void SmiCompare(Register dst, Smi* src);
435 void SmiCompare(Register dst, const Operand& src);
436 void SmiCompare(const Operand& dst, Register src);
437 void SmiCompare(const Operand& dst, Smi* src);
438 // Compare the int32 in src register to the value of the smi stored at dst.
439 void SmiCompareInteger32(const Operand& dst, Register src);
440 // Sets sign and zero flags depending on value of smi in register.
441 void SmiTest(Register src);
443 // Functions performing a check on a known or potential smi. Returns
444 // a condition that is satisfied if the check is successful.
446 // Is the value a tagged smi.
447 Condition CheckSmi(Register src);
448 Condition CheckSmi(const Operand& src);
450 // Is the value a non-negative tagged smi.
451 Condition CheckNonNegativeSmi(Register src);
453 // Are both values tagged smis.
454 Condition CheckBothSmi(Register first, Register second);
456 // Are both values non-negative tagged smis.
457 Condition CheckBothNonNegativeSmi(Register first, Register second);
459 // Are either value a tagged smi.
460 Condition CheckEitherSmi(Register first,
462 Register scratch = kScratchRegister);
464 // Checks whether an 32-bit integer value is a valid for conversion
466 Condition CheckInteger32ValidSmiValue(Register src);
468 // Checks whether an 32-bit unsigned integer value is a valid for
469 // conversion to a smi.
470 Condition CheckUInteger32ValidSmiValue(Register src);
472 // Check whether src is a Smi, and set dst to zero if it is a smi,
473 // and to one if it isn't.
474 void CheckSmiToIndicator(Register dst, Register src);
475 void CheckSmiToIndicator(Register dst, const Operand& src);
477 // Test-and-jump functions. Typically combines a check function
478 // above with a conditional jump.
480 // Jump if the value can be represented by a smi.
481 void JumpIfValidSmiValue(Register src, Label* on_valid,
482 Label::Distance near_jump = Label::kFar);
484 // Jump if the value cannot be represented by a smi.
485 void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
486 Label::Distance near_jump = Label::kFar);
488 // Jump if the unsigned integer value can be represented by a smi.
489 void JumpIfUIntValidSmiValue(Register src, Label* on_valid,
490 Label::Distance near_jump = Label::kFar);
492 // Jump if the unsigned integer value cannot be represented by a smi.
493 void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
494 Label::Distance near_jump = Label::kFar);
496 // Jump to label if the value is a tagged smi.
497 void JumpIfSmi(Register src,
499 Label::Distance near_jump = Label::kFar);
501 // Jump to label if the value is not a tagged smi.
502 void JumpIfNotSmi(Register src,
504 Label::Distance near_jump = Label::kFar);
506 // Jump to label if the value is not a non-negative tagged smi.
507 void JumpUnlessNonNegativeSmi(Register src,
509 Label::Distance near_jump = Label::kFar);
511 // Jump to label if the value, which must be a tagged smi, has value equal
513 void JumpIfSmiEqualsConstant(Register src,
516 Label::Distance near_jump = Label::kFar);
518 // Jump if either or both register are not smi values.
519 void JumpIfNotBothSmi(Register src1,
521 Label* on_not_both_smi,
522 Label::Distance near_jump = Label::kFar);
524 // Jump if either or both register are not non-negative smi values.
525 void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
526 Label* on_not_both_smi,
527 Label::Distance near_jump = Label::kFar);
529 // Operations on tagged smi values.
531 // Smis represent a subset of integers. The subset is always equivalent to
532 // a two's complement interpretation of a fixed number of bits.
534 // Add an integer constant to a tagged smi, giving a tagged smi as result.
535 // No overflow testing on the result is done.
536 void SmiAddConstant(Register dst, Register src, Smi* constant);
538 // Add an integer constant to a tagged smi, giving a tagged smi as result.
539 // No overflow testing on the result is done.
540 void SmiAddConstant(const Operand& dst, Smi* constant);
542 // Add an integer constant to a tagged smi, giving a tagged smi as result,
543 // or jumping to a label if the result cannot be represented by a smi.
544 void SmiAddConstant(Register dst, Register src, Smi* constant,
545 SmiOperationConstraints constraints, Label* bailout_label,
546 Label::Distance near_jump = Label::kFar);
548 // Subtract an integer constant from a tagged smi, giving a tagged smi as
549 // result. No testing on the result is done. Sets the N and Z flags
550 // based on the value of the resulting integer.
551 void SmiSubConstant(Register dst, Register src, Smi* constant);
553 // Subtract an integer constant from a tagged smi, giving a tagged smi as
554 // result, or jumping to a label if the result cannot be represented by a smi.
555 void SmiSubConstant(Register dst, Register src, Smi* constant,
556 SmiOperationConstraints constraints, Label* bailout_label,
557 Label::Distance near_jump = Label::kFar);
559 // Negating a smi can give a negative zero or too large positive value.
560 // NOTICE: This operation jumps on success, not failure!
561 void SmiNeg(Register dst,
563 Label* on_smi_result,
564 Label::Distance near_jump = Label::kFar);
566 // Adds smi values and return the result as a smi.
567 // If dst is src1, then src1 will be destroyed if the operation is
568 // successful, otherwise kept intact.
569 void SmiAdd(Register dst,
572 Label* on_not_smi_result,
573 Label::Distance near_jump = Label::kFar);
574 void SmiAdd(Register dst,
577 Label* on_not_smi_result,
578 Label::Distance near_jump = Label::kFar);
580 void SmiAdd(Register dst,
584 // Subtracts smi values and return the result as a smi.
585 // If dst is src1, then src1 will be destroyed if the operation is
586 // successful, otherwise kept intact.
587 void SmiSub(Register dst,
590 Label* on_not_smi_result,
591 Label::Distance near_jump = Label::kFar);
592 void SmiSub(Register dst,
595 Label* on_not_smi_result,
596 Label::Distance near_jump = Label::kFar);
598 void SmiSub(Register dst,
602 void SmiSub(Register dst,
604 const Operand& src2);
606 // Multiplies smi values and return the result as a smi,
608 // If dst is src1, then src1 will be destroyed, even if
609 // the operation is unsuccessful.
610 void SmiMul(Register dst,
613 Label* on_not_smi_result,
614 Label::Distance near_jump = Label::kFar);
616 // Divides one smi by another and returns the quotient.
617 // Clobbers rax and rdx registers.
618 void SmiDiv(Register dst,
621 Label* on_not_smi_result,
622 Label::Distance near_jump = Label::kFar);
624 // Divides one smi by another and returns the remainder.
625 // Clobbers rax and rdx registers.
626 void SmiMod(Register dst,
629 Label* on_not_smi_result,
630 Label::Distance near_jump = Label::kFar);
632 // Bitwise operations.
633 void SmiNot(Register dst, Register src);
634 void SmiAnd(Register dst, Register src1, Register src2);
635 void SmiOr(Register dst, Register src1, Register src2);
636 void SmiXor(Register dst, Register src1, Register src2);
637 void SmiAndConstant(Register dst, Register src1, Smi* constant);
638 void SmiOrConstant(Register dst, Register src1, Smi* constant);
639 void SmiXorConstant(Register dst, Register src1, Smi* constant);
641 void SmiShiftLeftConstant(Register dst,
644 Label* on_not_smi_result = NULL,
645 Label::Distance near_jump = Label::kFar);
646 void SmiShiftLogicalRightConstant(Register dst,
649 Label* on_not_smi_result,
650 Label::Distance near_jump = Label::kFar);
651 void SmiShiftArithmeticRightConstant(Register dst,
655 // Shifts a smi value to the left, and returns the result if that is a smi.
656 // Uses and clobbers rcx, so dst may not be rcx.
657 void SmiShiftLeft(Register dst,
660 Label* on_not_smi_result = NULL,
661 Label::Distance near_jump = Label::kFar);
662 // Shifts a smi value to the right, shifting in zero bits at the top, and
663 // returns the unsigned intepretation of the result if that is a smi.
664 // Uses and clobbers rcx, so dst may not be rcx.
665 void SmiShiftLogicalRight(Register dst,
668 Label* on_not_smi_result,
669 Label::Distance near_jump = Label::kFar);
670 // Shifts a smi value to the right, sign extending the top, and
671 // returns the signed intepretation of the result. That will always
672 // be a valid smi value, since it's numerically smaller than the
674 // Uses and clobbers rcx, so dst may not be rcx.
675 void SmiShiftArithmeticRight(Register dst,
679 // Specialized operations
681 // Select the non-smi register of two registers where exactly one is a
682 // smi. If neither are smis, jump to the failure label.
683 void SelectNonSmi(Register dst,
687 Label::Distance near_jump = Label::kFar);
689 // Converts, if necessary, a smi to a combination of number and
690 // multiplier to be used as a scaled index.
691 // The src register contains a *positive* smi value. The shift is the
692 // power of two to multiply the index value by (e.g.
693 // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
694 // The returned index register may be either src or dst, depending
695 // on what is most efficient. If src and dst are different registers,
696 // src is always unchanged.
697 SmiIndex SmiToIndex(Register dst, Register src, int shift);
699 // Converts a positive smi to a negative index.
700 SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
702 // Add the value of a smi in memory to an int32 register.
703 // Sets flags as a normal add.
704 void AddSmiField(Register dst, const Operand& src);
706 // Basic Smi operations.
707 void Move(Register dst, Smi* source) {
708 LoadSmiConstant(dst, source);
711 void Move(const Operand& dst, Smi* source) {
712 Register constant = GetSmiConstant(source);
718 // Save away a raw integer with pointer size on the stack as two integers
719 // masquerading as smis so that the garbage collector skips visiting them.
720 void PushRegisterAsTwoSmis(Register src, Register scratch = kScratchRegister);
721 // Reconstruct a raw integer with pointer size from two integers masquerading
722 // as smis on the top of stack.
723 void PopRegisterAsTwoSmis(Register dst, Register scratch = kScratchRegister);
725 void Test(const Operand& dst, Smi* source);
728 // ---------------------------------------------------------------------------
731 // Generate code to do a lookup in the number string cache. If the number in
732 // the register object is found in the cache the generated code falls through
733 // with the result in the result register. The object and the result register
734 // can be the same. If the number is not found in the cache the code jumps to
735 // the label not_found with only the content of register object unchanged.
736 void LookupNumberStringCache(Register object,
742 // If object is a string, its map is loaded into object_map.
743 void JumpIfNotString(Register object,
746 Label::Distance near_jump = Label::kFar);
749 void JumpIfNotBothSequentialOneByteStrings(
750 Register first_object, Register second_object, Register scratch1,
751 Register scratch2, Label* on_not_both_flat_one_byte,
752 Label::Distance near_jump = Label::kFar);
754 // Check whether the instance type represents a flat one-byte string. Jump
755 // to the label if not. If the instance type can be scratched specify same
756 // register for both instance type and scratch.
757 void JumpIfInstanceTypeIsNotSequentialOneByte(
758 Register instance_type, Register scratch,
759 Label* on_not_flat_one_byte_string,
760 Label::Distance near_jump = Label::kFar);
762 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
763 Register first_object_instance_type, Register second_object_instance_type,
764 Register scratch1, Register scratch2, Label* on_fail,
765 Label::Distance near_jump = Label::kFar);
767 void EmitSeqStringSetCharCheck(Register string,
770 uint32_t encoding_mask);
772 // Checks if the given register or operand is a unique name
773 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
774 Label::Distance distance = Label::kFar);
775 void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
776 Label::Distance distance = Label::kFar);
778 // ---------------------------------------------------------------------------
779 // Macro instructions.
781 // Load/store with specific representation.
782 void Load(Register dst, const Operand& src, Representation r);
783 void Store(const Operand& dst, Register src, Representation r);
785 // Load a register with a long value as efficiently as possible.
786 void Set(Register dst, int64_t x);
787 void Set(const Operand& dst, intptr_t x);
789 // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
790 // hinders register renaming and makes dependence chains longer. So we use
791 // xorps to clear the dst register before cvtsi2sd to solve this issue.
792 void Cvtlsi2sd(XMMRegister dst, Register src);
793 void Cvtlsi2sd(XMMRegister dst, const Operand& src);
795 // Move if the registers are not identical.
796 void Move(Register target, Register source);
798 // TestBit and Load SharedFunctionInfo special field.
799 void TestBitSharedFunctionInfoSpecialField(Register base,
802 void LoadSharedFunctionInfoSpecialField(Register dst,
807 void Move(Register dst, Handle<Object> source);
808 void Move(const Operand& dst, Handle<Object> source);
809 void Cmp(Register dst, Handle<Object> source);
810 void Cmp(const Operand& dst, Handle<Object> source);
811 void Cmp(Register dst, Smi* src);
812 void Cmp(const Operand& dst, Smi* src);
813 void Push(Handle<Object> source);
815 // Load a heap object and handle the case of new-space objects by
816 // indirecting via a global cell.
817 void MoveHeapObject(Register result, Handle<Object> object);
819 // Load a global cell into a register.
820 void LoadGlobalCell(Register dst, Handle<Cell> cell);
822 // Compare the given value and the value of weak cell.
823 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
825 void GetWeakValue(Register value, Handle<WeakCell> cell);
827 // Load the value of the weak cell in the value register. Branch to the given
828 // miss label if the weak cell was cleared.
829 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
831 // Emit code to discard a non-negative number of pointer-sized elements
832 // from the stack, clobbering only the rsp register.
833 void Drop(int stack_elements);
834 // Emit code to discard a positive number of pointer-sized elements
835 // from the stack under the return address which remains on the top,
836 // clobbering the rsp register.
837 void DropUnderReturnAddress(int stack_elements,
838 Register scratch = kScratchRegister);
840 void Call(Label* target) { call(target); }
841 void Push(Register src);
842 void Push(const Operand& src);
843 void PushQuad(const Operand& src);
844 void Push(Immediate value);
845 void PushImm32(int32_t imm32);
846 void Pop(Register dst);
847 void Pop(const Operand& dst);
848 void PopQuad(const Operand& dst);
849 void PushReturnAddressFrom(Register src) { pushq(src); }
850 void PopReturnAddressTo(Register dst) { popq(dst); }
851 void Move(Register dst, ExternalReference ext) {
852 movp(dst, reinterpret_cast<void*>(ext.address()),
853 RelocInfo::EXTERNAL_REFERENCE);
856 // Loads a pointer into a register with a relocation mode.
857 void Move(Register dst, void* ptr, RelocInfo::Mode rmode) {
858 // This method must not be used with heap object references. The stored
859 // address is not GC safe. Use the handle version instead.
860 DCHECK(rmode > RelocInfo::LAST_GCED_ENUM);
861 movp(dst, ptr, rmode);
864 void Move(Register dst, Handle<Object> value, RelocInfo::Mode rmode) {
865 AllowDeferredHandleDereference using_raw_address;
866 DCHECK(!RelocInfo::IsNone(rmode));
867 DCHECK(value->IsHeapObject());
868 DCHECK(!isolate()->heap()->InNewSpace(*value));
869 movp(dst, reinterpret_cast<void*>(value.location()), rmode);
872 void Move(XMMRegister dst, uint32_t src);
873 void Move(XMMRegister dst, uint64_t src);
874 void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
875 void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
878 void Jump(Address destination, RelocInfo::Mode rmode);
879 void Jump(ExternalReference ext);
880 void Jump(const Operand& op);
881 void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
883 void Call(Address destination, RelocInfo::Mode rmode);
884 void Call(ExternalReference ext);
885 void Call(const Operand& op);
886 void Call(Handle<Code> code_object,
887 RelocInfo::Mode rmode,
888 TypeFeedbackId ast_id = TypeFeedbackId::None());
890 // The size of the code generated for different call instructions.
891 int CallSize(Address destination) {
892 return kCallSequenceLength;
894 int CallSize(ExternalReference ext);
895 int CallSize(Handle<Code> code_object) {
896 // Code calls use 32-bit relative addressing.
897 return kShortCallInstructionLength;
899 int CallSize(Register target) {
900 // Opcode: REX_opt FF /2 m64
901 return (target.high_bit() != 0) ? 3 : 2;
903 int CallSize(const Operand& target) {
904 // Opcode: REX_opt FF /2 m64
905 return (target.requires_rex() ? 2 : 1) + target.operand_size();
908 // Emit call to the code we are currently generating.
910 Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
911 Call(self, RelocInfo::CODE_TARGET);
914 // Non-SSE2 instructions.
915 void Pextrd(Register dst, XMMRegister src, int8_t imm8);
916 void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
917 void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
919 void Lzcntl(Register dst, Register src);
920 void Lzcntl(Register dst, const Operand& src);
922 // Non-x64 instructions.
923 // Push/pop all general purpose registers.
924 // Does not push rsp/rbp nor any of the assembler's special purpose registers
925 // (kScratchRegister, kRootRegister).
928 // Sets the stack as after performing Popad, without actually loading the
932 // Compare object type for heap object.
933 // Always use unsigned comparisons: above and below, not less and greater.
934 // Incoming register is heap_object and outgoing register is map.
935 // They may be the same register, and may be kScratchRegister.
936 void CmpObjectType(Register heap_object, InstanceType type, Register map);
938 // Compare instance type for map.
939 // Always use unsigned comparisons: above and below, not less and greater.
940 void CmpInstanceType(Register map, InstanceType type);
942 // Check if a map for a JSObject indicates that the object has fast elements.
943 // Jump to the specified label if it does not.
944 void CheckFastElements(Register map,
946 Label::Distance distance = Label::kFar);
948 // Check if a map for a JSObject indicates that the object can have both smi
949 // and HeapObject elements. Jump to the specified label if it does not.
950 void CheckFastObjectElements(Register map,
952 Label::Distance distance = Label::kFar);
954 // Check if a map for a JSObject indicates that the object has fast smi only
955 // elements. Jump to the specified label if it does not.
956 void CheckFastSmiElements(Register map,
958 Label::Distance distance = Label::kFar);
960 // Check to see if maybe_number can be stored as a double in
961 // FastDoubleElements. If it can, store it at the index specified by index in
962 // the FastDoubleElements array elements, otherwise jump to fail. Note that
963 // index must not be smi-tagged.
964 void StoreNumberToDoubleElements(Register maybe_number,
967 XMMRegister xmm_scratch,
969 int elements_offset = 0);
971 // Compare an object's map with the specified map.
972 void CompareMap(Register obj, Handle<Map> map);
974 // Check if the map of an object is equal to a specified map and branch to
975 // label if not. Skip the smi check if not required (object is known to be a
976 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
977 // against maps that are ElementsKind transition maps of the specified map.
978 void CheckMap(Register obj,
981 SmiCheckType smi_check_type);
983 // Check if the map of an object is equal to a specified weak map and branch
984 // to a specified target if equal. Skip the smi check if not required
985 // (object is known to be a heap object)
986 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
987 Handle<WeakCell> cell, Handle<Code> success,
988 SmiCheckType smi_check_type);
990 // Check if the object in register heap_object is a string. Afterwards the
991 // register map contains the object map and the register instance_type
992 // contains the instance_type. The registers map and instance_type can be the
993 // same in which case it contains the instance type afterwards. Either of the
994 // registers map and instance_type can be the same as heap_object.
995 Condition IsObjectStringType(Register heap_object,
997 Register instance_type);
999 // Check if the object in register heap_object is a name. Afterwards the
1000 // register map contains the object map and the register instance_type
1001 // contains the instance_type. The registers map and instance_type can be the
1002 // same in which case it contains the instance type afterwards. Either of the
1003 // registers map and instance_type can be the same as heap_object.
1004 Condition IsObjectNameType(Register heap_object,
1006 Register instance_type);
1008 // FCmp compares and pops the two values on top of the FPU stack.
1009 // The flag results are similar to integer cmp, but requires unsigned
1010 // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
1013 void ClampUint8(Register reg);
1015 void ClampDoubleToUint8(XMMRegister input_reg,
1016 XMMRegister temp_xmm_reg,
1017 Register result_reg);
1019 void SlowTruncateToI(Register result_reg, Register input_reg,
1020 int offset = HeapNumber::kValueOffset - kHeapObjectTag);
1022 void TruncateHeapNumberToI(Register result_reg, Register input_reg);
1023 void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
1025 void DoubleToI(Register result_reg, XMMRegister input_reg,
1026 XMMRegister scratch, MinusZeroMode minus_zero_mode,
1027 Label* lost_precision, Label* is_nan, Label* minus_zero,
1028 Label::Distance dst = Label::kFar);
1030 void LoadUint32(XMMRegister dst, Register src);
1032 void LoadInstanceDescriptors(Register map, Register descriptors);
1033 void EnumLength(Register dst, Register map);
1034 void NumberOfOwnDescriptors(Register dst, Register map);
1035 void LoadAccessor(Register dst, Register holder, int accessor_index,
1036 AccessorComponent accessor);
1038 template<typename Field>
1039 void DecodeField(Register reg) {
1040 static const int shift = Field::kShift;
1041 static const int mask = Field::kMask >> Field::kShift;
1043 shrp(reg, Immediate(shift));
1045 andp(reg, Immediate(mask));
1048 template<typename Field>
1049 void DecodeFieldToSmi(Register reg) {
1050 if (SmiValuesAre32Bits()) {
1051 andp(reg, Immediate(Field::kMask));
1052 shlp(reg, Immediate(kSmiShift - Field::kShift));
1054 static const int shift = Field::kShift;
1055 static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
1056 DCHECK(SmiValuesAre31Bits());
1057 DCHECK(kSmiShift == kSmiTagSize);
1058 DCHECK((mask & 0x80000000u) == 0);
1059 if (shift < kSmiShift) {
1060 shlp(reg, Immediate(kSmiShift - shift));
1061 } else if (shift > kSmiShift) {
1062 sarp(reg, Immediate(shift - kSmiShift));
1064 andp(reg, Immediate(mask));
1068 // Abort execution if argument is not a number, enabled via --debug-code.
1069 void AssertNumber(Register object);
1071 // Abort execution if argument is a smi, enabled via --debug-code.
1072 void AssertNotSmi(Register object);
1074 // Abort execution if argument is not a smi, enabled via --debug-code.
1075 void AssertSmi(Register object);
1076 void AssertSmi(const Operand& object);
1078 // Abort execution if a 64 bit register containing a 32 bit payload does not
1079 // have zeros in the top 32 bits, enabled via --debug-code.
1080 void AssertZeroExtended(Register reg);
1082 // Abort execution if argument is not a string, enabled via --debug-code.
1083 void AssertString(Register object);
1085 // Abort execution if argument is not a name, enabled via --debug-code.
1086 void AssertName(Register object);
1088 // Abort execution if argument is not undefined or an AllocationSite, enabled
1089 // via --debug-code.
1090 void AssertUndefinedOrAllocationSite(Register object);
1092 // Abort execution if argument is not the root value with the given index,
1093 // enabled via --debug-code.
1094 void AssertRootValue(Register src,
1095 Heap::RootListIndex root_value_index,
1096 BailoutReason reason);
1098 // ---------------------------------------------------------------------------
1099 // Exception handling
1101 // Push a new stack handler and link it into stack handler chain.
1102 void PushStackHandler();
1104 // Unlink the stack handler on top of the stack from the stack handler chain.
1105 void PopStackHandler();
1107 // ---------------------------------------------------------------------------
1108 // Inline caching support
1110 // Generate code for checking access rights - used for security checks
1111 // on access to global objects across environments. The holder register
1112 // is left untouched, but the scratch register and kScratchRegister,
1113 // which must be different, are clobbered.
1114 void CheckAccessGlobalProxy(Register holder_reg,
1118 void GetNumberHash(Register r0, Register scratch);
1120 void LoadFromNumberDictionary(Label* miss,
1129 // ---------------------------------------------------------------------------
1130 // Allocation support
1132 // Allocate an object in new space or old space. If the given space
1133 // is exhausted control continues at the gc_required label. The allocated
1134 // object is returned in result and end of the new object is returned in
1135 // result_end. The register scratch can be passed as no_reg in which case
1136 // an additional object reference will be added to the reloc info. The
1137 // returned pointers in result and result_end have not yet been tagged as
1138 // heap objects. If result_contains_top_on_entry is true the content of
1139 // result is known to be the allocation top on entry (could be result_end
1140 // from a previous call). If result_contains_top_on_entry is true scratch
1141 // should be no_reg as it is never used.
1142 void Allocate(int object_size,
1144 Register result_end,
1147 AllocationFlags flags);
1149 void Allocate(int header_size,
1150 ScaleFactor element_size,
1151 Register element_count,
1153 Register result_end,
1156 AllocationFlags flags);
1158 void Allocate(Register object_size,
1160 Register result_end,
1163 AllocationFlags flags);
1165 // Allocate a heap number in new space with undefined value. Returns
1166 // tagged pointer in result register, or jumps to gc_required if new
1168 void AllocateHeapNumber(Register result,
1171 MutableMode mode = IMMUTABLE);
1173 // Allocate a sequential string. All the header fields of the string object
1175 void AllocateTwoByteString(Register result,
1180 Label* gc_required);
1181 void AllocateOneByteString(Register result, Register length,
1182 Register scratch1, Register scratch2,
1183 Register scratch3, Label* gc_required);
1185 // Allocate a raw cons string object. Only the map field of the result is
1187 void AllocateTwoByteConsString(Register result,
1190 Label* gc_required);
1191 void AllocateOneByteConsString(Register result, Register scratch1,
1192 Register scratch2, Label* gc_required);
1194 // Allocate a raw sliced string object. Only the map field of the result is
1196 void AllocateTwoByteSlicedString(Register result,
1199 Label* gc_required);
1200 void AllocateOneByteSlicedString(Register result, Register scratch1,
1201 Register scratch2, Label* gc_required);
1203 // ---------------------------------------------------------------------------
1204 // Support functions.
1206 // Check if result is zero and op is negative.
1207 void NegativeZeroTest(Register result, Register op, Label* then_label);
1209 // Check if result is zero and op is negative in code using jump targets.
1210 void NegativeZeroTest(CodeGenerator* cgen,
1213 JumpTarget* then_target);
1215 // Check if result is zero and any of op1 and op2 are negative.
1216 // Register scratch is destroyed, and it must be different from op2.
1217 void NegativeZeroTest(Register result, Register op1, Register op2,
1218 Register scratch, Label* then_label);
1220 // Machine code version of Map::GetConstructor().
1221 // |temp| holds |result|'s map when done.
1222 void GetMapConstructor(Register result, Register map, Register temp);
1224 // Try to get function prototype of a function and puts the value in
1225 // the result register. Checks that the function really is a
1226 // function and jumps to the miss label if the fast checks fail. The
1227 // function register will be untouched; the other register may be
1229 void TryGetFunctionPrototype(Register function,
1232 bool miss_on_bound_function = false);
1234 // Picks out an array index from the hash field.
1236 // hash - holds the index's hash. Clobbered.
1237 // index - holds the overwritten index on exit.
1238 void IndexFromHash(Register hash, Register index);
1240 // Find the function context up the context chain.
1241 void LoadContext(Register dst, int context_chain_length);
1243 // Conditionally load the cached Array transitioned map of type
1244 // transitioned_kind from the native context if the map in register
1245 // map_in_out is the cached Array map in the native context of
1247 void LoadTransitionedArrayMapConditional(
1248 ElementsKind expected_kind,
1249 ElementsKind transitioned_kind,
1250 Register map_in_out,
1252 Label* no_map_match);
1254 // Load the global function with the given index.
1255 void LoadGlobalFunction(int index, Register function);
1257 // Load the initial map from the global function. The registers
1258 // function and map can be the same.
1259 void LoadGlobalFunctionInitialMap(Register function, Register map);
1261 // ---------------------------------------------------------------------------
1264 // Call a code stub.
1265 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
1267 // Tail call a code stub (jump).
1268 void TailCallStub(CodeStub* stub);
1270 // Return from a code stub after popping its arguments.
1271 void StubReturn(int argc);
1273 // Call a runtime routine.
1274 void CallRuntime(const Runtime::Function* f,
1276 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1278 // Call a runtime function and save the value of XMM registers.
1279 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1280 const Runtime::Function* function = Runtime::FunctionForId(id);
1281 CallRuntime(function, function->nargs, kSaveFPRegs);
1284 // Convenience function: Same as above, but takes the fid instead.
1285 void CallRuntime(Runtime::FunctionId id,
1287 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1288 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1291 // Convenience function: call an external reference.
1292 void CallExternalReference(const ExternalReference& ext,
1295 // Tail call of a runtime routine (jump).
1296 // Like JumpToExternalReference, but also takes care of passing the number
1298 void TailCallExternalReference(const ExternalReference& ext,
1302 // Convenience function: tail call a runtime routine (jump).
1303 void TailCallRuntime(Runtime::FunctionId fid,
1307 // Jump to a runtime routine.
1308 void JumpToExternalReference(const ExternalReference& ext, int result_size);
1310 // Before calling a C-function from generated code, align arguments on stack.
1311 // After aligning the frame, arguments must be stored in rsp[0], rsp[8],
1312 // etc., not pushed. The argument count assumes all arguments are word sized.
1313 // The number of slots reserved for arguments depends on platform. On Windows
1314 // stack slots are reserved for the arguments passed in registers. On other
1315 // platforms stack slots are only reserved for the arguments actually passed
1317 void PrepareCallCFunction(int num_arguments);
1319 // Calls a C function and cleans up the space for arguments allocated
1320 // by PrepareCallCFunction. The called function is not allowed to trigger a
1321 // garbage collection, since that might move the code and invalidate the
1322 // return address (unless this is somehow accounted for by the called
1324 void CallCFunction(ExternalReference function, int num_arguments);
1325 void CallCFunction(Register function, int num_arguments);
1327 // Calculate the number of stack slots to reserve for arguments when calling a
1329 int ArgumentStackSlotsForCFunctionCall(int num_arguments);
1331 // ---------------------------------------------------------------------------
1336 // Return and drop arguments from stack, where the number of arguments
1337 // may be bigger than 2^16 - 1. Requires a scratch register.
1338 void Ret(int bytes_dropped, Register scratch);
1340 Handle<Object> CodeObject() {
1341 DCHECK(!code_object_.is_null());
1342 return code_object_;
1345 // Copy length bytes from source to destination.
1346 // Uses scratch register internally (if you have a low-eight register
1347 // free, do use it, otherwise kScratchRegister will be used).
1348 // The min_length is a minimum limit on the value that length will have.
1349 // The algorithm has some special cases that might be omitted if the string
1350 // is known to always be long.
1351 void CopyBytes(Register destination,
1355 Register scratch = kScratchRegister);
1357 // Initialize fields with filler values. Fields starting at |start_offset|
1358 // not including end_offset are overwritten with the value in |filler|. At
1359 // the end the loop, |start_offset| takes the value of |end_offset|.
1360 void InitializeFieldsWithFiller(Register start_offset,
1361 Register end_offset,
1365 // Emit code for a truncating division by a constant. The dividend register is
1366 // unchanged, the result is in rdx, and rax gets clobbered.
1367 void TruncatingDiv(Register dividend, int32_t divisor);
1369 // ---------------------------------------------------------------------------
1370 // StatsCounter support
1372 void SetCounter(StatsCounter* counter, int value);
1373 void IncrementCounter(StatsCounter* counter, int value);
1374 void DecrementCounter(StatsCounter* counter, int value);
1377 // ---------------------------------------------------------------------------
1380 // Calls Abort(msg) if the condition cc is not satisfied.
1381 // Use --debug_code to enable.
1382 void Assert(Condition cc, BailoutReason reason);
1384 void AssertFastElements(Register elements);
1386 // Like Assert(), but always enabled.
1387 void Check(Condition cc, BailoutReason reason);
1389 // Print a message to stdout and abort execution.
1390 void Abort(BailoutReason msg);
1392 // Check that the stack is aligned.
1393 void CheckStackAlignment();
1395 // Verify restrictions about code generated in stubs.
1396 void set_generating_stub(bool value) { generating_stub_ = value; }
1397 bool generating_stub() { return generating_stub_; }
1398 void set_has_frame(bool value) { has_frame_ = value; }
1399 bool has_frame() { return has_frame_; }
1400 inline bool AllowThisStubCall(CodeStub* stub);
1402 static int SafepointRegisterStackIndex(Register reg) {
1403 return SafepointRegisterStackIndex(reg.code());
1406 // Activation support.
1407 void EnterFrame(StackFrame::Type type);
1408 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
1409 void LeaveFrame(StackFrame::Type type);
1411 // Expects object in rax and returns map with validated enum cache
1412 // in rax. Assumes that any other register can be used as a scratch.
1413 void CheckEnumCache(Register null_value,
1414 Label* call_runtime);
1416 // AllocationMemento support. Arrays may have an associated
1417 // AllocationMemento object that can be checked for in order to pretransition
1419 // On entry, receiver_reg should point to the array object.
1420 // scratch_reg gets clobbered.
1421 // If allocation info is present, condition flags are set to equal.
1422 void TestJSArrayForAllocationMemento(Register receiver_reg,
1423 Register scratch_reg,
1424 Label* no_memento_found);
1426 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1427 Register scratch_reg,
1428 Label* memento_found) {
1429 Label no_memento_found;
1430 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1432 j(equal, memento_found);
1433 bind(&no_memento_found);
1436 // Jumps to found label if a prototype map has dictionary elements.
1437 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1438 Register scratch1, Label* found);
1441 // Order general registers are pushed by Pushad.
1442 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14, r15.
1443 static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
1444 static const int kNumSafepointSavedRegisters = 12;
1445 static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1447 bool generating_stub_;
1449 bool root_array_available_;
1451 // Returns a register holding the smi value. The register MUST NOT be
1452 // modified. It may be the "smi 1 constant" register.
1453 Register GetSmiConstant(Smi* value);
1455 int64_t RootRegisterDelta(ExternalReference other);
1457 // Moves the smi value to the destination register.
1458 void LoadSmiConstant(Register dst, Smi* value);
1460 // This handle will be patched with the code object on installation.
1461 Handle<Object> code_object_;
1463 // Helper functions for generating invokes.
1464 void InvokePrologue(const ParameterCount& expected,
1465 const ParameterCount& actual,
1466 Handle<Code> code_constant,
1467 Register code_register,
1469 bool* definitely_mismatches,
1471 Label::Distance near_jump = Label::kFar,
1472 const CallWrapper& call_wrapper = NullCallWrapper());
1474 void EnterExitFramePrologue(bool save_rax);
1476 // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
1477 // accessible via StackSpaceOperand.
1478 void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
1480 void LeaveExitFrameEpilogue(bool restore_context);
1482 // Allocation support helpers.
1483 // Loads the top of new-space into the result register.
1484 // Otherwise the address of the new-space top is loaded into scratch (if
1485 // scratch is valid), and the new-space top is loaded into result.
1486 void LoadAllocationTopHelper(Register result,
1488 AllocationFlags flags);
1490 void MakeSureDoubleAlignedHelper(Register result,
1493 AllocationFlags flags);
1495 // Update allocation top with value in result_end register.
1496 // If scratch is valid, it contains the address of the allocation top.
1497 void UpdateAllocationTopHelper(Register result_end,
1499 AllocationFlags flags);
1501 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1502 void InNewSpace(Register object,
1506 Label::Distance distance = Label::kFar);
1508 // Helper for finding the mark bits for an address. Afterwards, the
1509 // bitmap register points at the word with the mark bits and the mask
1510 // the position of the first bit. Uses rcx as scratch and leaves addr_reg
1512 inline void GetMarkBits(Register addr_reg,
1513 Register bitmap_reg,
1516 // Compute memory operands for safepoint stack slots.
1517 Operand SafepointRegisterSlot(Register reg);
1518 static int SafepointRegisterStackIndex(int reg_code) {
1519 return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
1522 // Needs access to SafepointRegisterStackIndex for compiled frame
1524 friend class StandardFrame;
1528 // The code patcher is used to patch (typically) small parts of code e.g. for
1529 // debugging and other types of instrumentation. When using the code patcher
1530 // the exact number of bytes specified must be emitted. Is not legal to emit
1531 // relocation information. If any of these constraints are violated it causes
1535 CodePatcher(byte* address, int size);
1538 // Macro assembler to emit code.
1539 MacroAssembler* masm() { return &masm_; }
1542 byte* address_; // The address of the code being patched.
1543 int size_; // Number of bytes of the expected patch size.
1544 MacroAssembler masm_; // Macro assembler used to generate the code.
1548 // -----------------------------------------------------------------------------
1549 // Static helper functions.
1551 // Generate an Operand for loading a field from an object.
1552 inline Operand FieldOperand(Register object, int offset) {
1553 return Operand(object, offset - kHeapObjectTag);
1557 // Generate an Operand for loading an indexed field from an object.
1558 inline Operand FieldOperand(Register object,
1562 return Operand(object, index, scale, offset - kHeapObjectTag);
1566 inline Operand ContextOperand(Register context, int index) {
1567 return Operand(context, Context::SlotOffset(index));
1571 inline Operand ContextOperand(Register context, Register index) {
1572 return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
1576 inline Operand GlobalObjectOperand() {
1577 return ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX);
1581 // Provides access to exit frame stack space (not GCed).
1582 inline Operand StackSpaceOperand(int index) {
1584 const int kShaddowSpace = 4;
1585 return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
1587 return Operand(rsp, index * kPointerSize);
1592 inline Operand StackOperandForReturnAddress(int32_t disp) {
1593 return Operand(rsp, disp);
1597 #ifdef GENERATED_CODE_COVERAGE
1598 extern void LogGeneratedCodeCoverage(const char* file_line);
1599 #define CODE_COVERAGE_STRINGIFY(x) #x
1600 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1601 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1602 #define ACCESS_MASM(masm) { \
1603 Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
1606 masm->Push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
1607 masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE); \
1614 #define ACCESS_MASM(masm) masm->
1617 } } // namespace v8::internal
1619 #endif // V8_X64_MACRO_ASSEMBLER_X64_H_