1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_
29 #define V8_ARM64_INSTRUCTIONS_ARM64_H_
33 #include "arm64/constants-arm64.h"
34 #include "arm64/utils-arm64.h"
40 // ISA constants. --------------------------------------------------------------
42 typedef uint32_t Instr;
44 // The following macros initialize a float/double variable with a bit pattern
45 // without using static initializers: If ARM64_DEFINE_FP_STATICS is defined, the
46 // symbol is defined as uint32_t/uint64_t initialized with the desired bit
47 // pattern. Otherwise, the same symbol is declared as an external float/double.
48 #if defined(ARM64_DEFINE_FP_STATICS)
49 #define DEFINE_FLOAT(name, value) extern const uint32_t name = value
50 #define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
52 #define DEFINE_FLOAT(name, value) extern const float name
53 #define DEFINE_DOUBLE(name, value) extern const double name
54 #endif // defined(ARM64_DEFINE_FP_STATICS)
56 DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
57 DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
58 DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL);
59 DEFINE_DOUBLE(kFP64NegativeInfinity, 0xfff0000000000000UL);
61 // This value is a signalling NaN as both a double and as a float (taking the
62 // least-significant word).
63 DEFINE_DOUBLE(kFP64SignallingNaN, 0x7ff000007f800001);
64 DEFINE_FLOAT(kFP32SignallingNaN, 0x7f800001);
66 // A similar value, but as a quiet NaN.
67 DEFINE_DOUBLE(kFP64QuietNaN, 0x7ff800007fc00001);
68 DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001);
70 // The default NaN values (for FPCR.DN=1).
71 DEFINE_DOUBLE(kFP64DefaultNaN, 0x7ff8000000000000UL);
72 DEFINE_FLOAT(kFP32DefaultNaN, 0x7fc00000);
85 LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
88 UnknownBranchType = 0,
91 CompareBranchType = 3,
102 // The first four values are encodable directly by FPCR<RMode>.
104 FPPositiveInfinity = 0x1,
105 FPNegativeInfinity = 0x2,
108 // The final rounding mode is only available when explicitly specified by the
109 // instruction (such as with fcvta). It cannot be set in FPCR.
118 // Instructions. ---------------------------------------------------------------
122 V8_INLINE Instr InstructionBits() const {
123 return *reinterpret_cast<const Instr*>(this);
126 V8_INLINE void SetInstructionBits(Instr new_instr) {
127 *reinterpret_cast<Instr*>(this) = new_instr;
130 int Bit(int pos) const {
131 return (InstructionBits() >> pos) & 1;
134 uint32_t Bits(int msb, int lsb) const {
135 return unsigned_bitextract_32(msb, lsb, InstructionBits());
138 int32_t SignedBits(int msb, int lsb) const {
139 int32_t bits = *(reinterpret_cast<const int32_t*>(this));
140 return signed_bitextract_32(msb, lsb, bits);
143 Instr Mask(uint32_t mask) const {
144 return InstructionBits() & mask;
147 V8_INLINE Instruction* following(int count = 1) {
148 return InstructionAtOffset(count * static_cast<int>(kInstructionSize));
151 V8_INLINE Instruction* preceding(int count = 1) {
152 return following(-count);
155 #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
156 int64_t Name() const { return Func(HighBit, LowBit); }
157 INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
160 // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
161 // formed from ImmPCRelLo and ImmPCRelHi.
162 int ImmPCRel() const {
163 int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
164 int const width = ImmPCRelLo_width + ImmPCRelHi_width;
165 return signed_bitextract_32(width-1, 0, offset);
168 uint64_t ImmLogical();
172 LSDataSize SizeLSPair() const {
173 return CalcLSPairDataSize(
174 static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
178 bool IsCondBranchImm() const {
179 return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
182 bool IsUncondBranchImm() const {
183 return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
186 bool IsCompareBranch() const {
187 return Mask(CompareBranchFMask) == CompareBranchFixed;
190 bool IsTestBranch() const {
191 return Mask(TestBranchFMask) == TestBranchFixed;
194 bool IsLdrLiteral() const {
195 return Mask(LoadLiteralFMask) == LoadLiteralFixed;
198 bool IsLdrLiteralX() const {
199 return Mask(LoadLiteralMask) == LDR_x_lit;
202 bool IsPCRelAddressing() const {
203 return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
206 bool IsLogicalImmediate() const {
207 return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
210 bool IsAddSubImmediate() const {
211 return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
214 bool IsAddSubExtended() const {
215 return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
218 // Match any loads or stores, including pairs.
219 bool IsLoadOrStore() const {
220 return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
223 // Match any loads, including pairs.
225 // Match any stores, including pairs.
226 bool IsStore() const;
228 // Indicate whether Rd can be the stack pointer or the zero register. This
229 // does not check that the instruction actually has an Rd field.
230 Reg31Mode RdMode() const {
231 // The following instructions use csp or wsp as Rd:
232 // Add/sub (immediate) when not setting the flags.
233 // Add/sub (extended) when not setting the flags.
234 // Logical (immediate) when not setting the flags.
235 // Otherwise, r31 is the zero register.
236 if (IsAddSubImmediate() || IsAddSubExtended()) {
237 if (Mask(AddSubSetFlagsBit)) {
238 return Reg31IsZeroRegister;
240 return Reg31IsStackPointer;
243 if (IsLogicalImmediate()) {
244 // Of the logical (immediate) instructions, only ANDS (and its aliases)
245 // can set the flags. The others can all write into csp.
246 // Note that some logical operations are not available to
247 // immediate-operand instructions, so we have to combine two masks here.
248 if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
249 return Reg31IsZeroRegister;
251 return Reg31IsStackPointer;
254 return Reg31IsZeroRegister;
257 // Indicate whether Rn can be the stack pointer or the zero register. This
258 // does not check that the instruction actually has an Rn field.
259 Reg31Mode RnMode() const {
260 // The following instructions use csp or wsp as Rn:
261 // All loads and stores.
262 // Add/sub (immediate).
263 // Add/sub (extended).
264 // Otherwise, r31 is the zero register.
265 if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
266 return Reg31IsStackPointer;
268 return Reg31IsZeroRegister;
271 ImmBranchType BranchType() const {
272 if (IsCondBranchImm()) {
273 return CondBranchType;
274 } else if (IsUncondBranchImm()) {
275 return UncondBranchType;
276 } else if (IsCompareBranch()) {
277 return CompareBranchType;
278 } else if (IsTestBranch()) {
279 return TestBranchType;
281 return UnknownBranchType;
285 static int ImmBranchRangeBitwidth(ImmBranchType branch_type) {
286 switch (branch_type) {
287 case UncondBranchType:
288 return ImmUncondBranch_width;
290 return ImmCondBranch_width;
291 case CompareBranchType:
292 return ImmCmpBranch_width;
294 return ImmTestBranch_width;
301 // The range of the branch instruction, expressed as 'instr +- range'.
302 static int32_t ImmBranchRange(ImmBranchType branch_type) {
304 (1 << (ImmBranchRangeBitwidth(branch_type) + kInstructionSizeLog2)) / 2 -
308 int ImmBranch() const {
309 switch (BranchType()) {
310 case CondBranchType: return ImmCondBranch();
311 case UncondBranchType: return ImmUncondBranch();
312 case CompareBranchType: return ImmCmpBranch();
313 case TestBranchType: return ImmTestBranch();
314 default: UNREACHABLE();
319 bool IsBranchAndLinkToRegister() const {
320 return Mask(UnconditionalBranchToRegisterMask) == BLR;
323 bool IsMovz() const {
324 return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
325 (Mask(MoveWideImmediateMask) == MOVZ_w);
328 bool IsMovk() const {
329 return (Mask(MoveWideImmediateMask) == MOVK_x) ||
330 (Mask(MoveWideImmediateMask) == MOVK_w);
333 bool IsMovn() const {
334 return (Mask(MoveWideImmediateMask) == MOVN_x) ||
335 (Mask(MoveWideImmediateMask) == MOVN_w);
339 // A marking nop is an instruction
341 // which is encoded as
342 // orr r<n>, xzr, r<n>
343 return (Mask(LogicalShiftedMask) == ORR_x) &&
348 // Find the PC offset encoded in this instruction. 'this' may be a branch or
349 // a PC-relative addressing instruction.
350 // The offset returned is unscaled.
351 ptrdiff_t ImmPCOffset();
353 // Find the target of this instruction. 'this' may be a branch or a
354 // PC-relative addressing instruction.
355 Instruction* ImmPCOffsetTarget();
357 static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset);
358 bool IsTargetInImmPCOffsetRange(Instruction* target);
359 // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
360 // a PC-relative addressing instruction.
361 void SetImmPCOffsetTarget(Instruction* target);
362 // Patch a literal load instruction to load from 'source'.
363 void SetImmLLiteral(Instruction* source);
365 uint8_t* LiteralAddress() {
366 int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
367 return reinterpret_cast<uint8_t*>(this) + offset;
370 enum CheckAlignment { NO_CHECK, CHECK_ALIGNMENT };
372 V8_INLINE Instruction* InstructionAtOffset(
374 CheckAlignment check = CHECK_ALIGNMENT) {
375 Address addr = reinterpret_cast<Address>(this) + offset;
376 // The FUZZ_disasm test relies on no check being done.
377 ASSERT(check == NO_CHECK || IsAddressAligned(addr, kInstructionSize));
381 template<typename T> V8_INLINE static Instruction* Cast(T src) {
382 return reinterpret_cast<Instruction*>(src);
385 V8_INLINE ptrdiff_t DistanceTo(Instruction* target) {
386 return reinterpret_cast<Address>(target) - reinterpret_cast<Address>(this);
390 void SetPCRelImmTarget(Instruction* target);
391 void SetBranchImmTarget(Instruction* target);
395 // Where Instruction looks at instructions generated by the Assembler,
396 // InstructionSequence looks at instructions sequences generated by the
398 class InstructionSequence : public Instruction {
400 static InstructionSequence* At(Address address) {
401 return reinterpret_cast<InstructionSequence*>(address);
404 // Sequences generated by MacroAssembler::InlineData().
405 bool IsInlineData() const;
406 uint64_t InlineData() const;
410 // Simulator/Debugger debug instructions ---------------------------------------
411 // Each debug marker is represented by a HLT instruction. The immediate comment
412 // field in the instruction is used to identify the type of debug marker. Each
413 // marker encodes arguments in a different way, as described below.
415 // Indicate to the Debugger that the instruction is a redirected call.
416 const Instr kImmExceptionIsRedirectedCall = 0xca11;
418 // Represent unreachable code. This is used as a guard in parts of the code that
419 // should not be reachable, such as in data encoded inline in the instructions.
420 const Instr kImmExceptionIsUnreachable = 0xdebf;
422 // A pseudo 'printf' instruction. The arguments will be passed to the platform
424 const Instr kImmExceptionIsPrintf = 0xdeb1;
425 // Parameters are stored in ARM64 registers as if the printf pseudo-instruction
426 // was a call to the real printf method:
428 // x0: The format string, then either of:
429 // x1-x7: Optional arguments.
430 // d0-d7: Optional arguments.
432 // Floating-point and integer arguments are passed in separate sets of
433 // registers in AAPCS64 (even for varargs functions), so it is not possible to
434 // determine the type of location of each arguments without some information
435 // about the values that were passed in. This information could be retrieved
436 // from the printf format string, but the format string is not trivial to
437 // parse so we encode the relevant information with the HLT instruction.
439 // Either kRegister or kFPRegister, but stored as a uint32_t because there's
440 // no way to guarantee the size of the CPURegister::RegisterType enum.
441 const unsigned kPrintfTypeOffset = 1 * kInstructionSize;
442 const unsigned kPrintfLength = 2 * kInstructionSize;
444 // A pseudo 'debug' instruction.
445 const Instr kImmExceptionIsDebug = 0xdeb0;
446 // Parameters are inlined in the code after a debug pseudo-instruction:
448 // - Debug parameters.
449 // - Debug message string. This is a NULL-terminated ASCII string, padded to
450 // kInstructionSize so that subsequent instructions are correctly aligned.
451 // - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
453 const unsigned kDebugCodeOffset = 1 * kInstructionSize;
454 const unsigned kDebugParamsOffset = 2 * kInstructionSize;
455 const unsigned kDebugMessageOffset = 3 * kInstructionSize;
458 // Used without a TRACE_ option, the Debugger will print the arguments only
459 // once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing
460 // before every instruction for the specified LOG_ parameters.
462 // TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any
463 // others that were not specified.
467 // __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS);
468 // will print the registers and fp registers only once.
470 // __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
471 // starts disassembling the code.
473 // __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS);
474 // adds the general purpose registers to the trace.
476 // __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS);
477 // stops tracing the registers.
478 const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
479 enum DebugParameters {
482 LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
483 LOG_REGS = 1 << 2, // Log general purpose registers.
484 LOG_FP_REGS = 1 << 3, // Log floating-point registers.
485 LOG_SYS_REGS = 1 << 4, // Log the status flags.
486 LOG_WRITE = 1 << 5, // Log any memory write.
488 LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS,
489 LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
492 TRACE_ENABLE = 1 << 6,
493 TRACE_DISABLE = 2 << 6,
494 TRACE_OVERRIDE = 3 << 6
498 } } // namespace v8::internal
501 #endif // V8_ARM64_INSTRUCTIONS_ARM64_H_