c2a52af7cbbb5281331c310cd5a77a877739af4e
[platform/upstream/v8.git] / src / compiler / arm64 / instruction-codes-arm64.h
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
6 #define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
7
8 namespace v8 {
9 namespace internal {
10 namespace compiler {
11
12 // ARM64-specific opcodes that specify which assembly sequence to emit.
13 // Most opcodes specify a single instruction.
14 #define TARGET_ARCH_OPCODE_LIST(V) \
15   V(Arm64Add)                      \
16   V(Arm64Add32)                    \
17   V(Arm64And)                      \
18   V(Arm64And32)                    \
19   V(Arm64Bic)                      \
20   V(Arm64Bic32)                    \
21   V(Arm64Clz32)                    \
22   V(Arm64Cmp)                      \
23   V(Arm64Cmp32)                    \
24   V(Arm64Cmn)                      \
25   V(Arm64Cmn32)                    \
26   V(Arm64Tst)                      \
27   V(Arm64Tst32)                    \
28   V(Arm64Or)                       \
29   V(Arm64Or32)                     \
30   V(Arm64Orn)                      \
31   V(Arm64Orn32)                    \
32   V(Arm64Eor)                      \
33   V(Arm64Eor32)                    \
34   V(Arm64Eon)                      \
35   V(Arm64Eon32)                    \
36   V(Arm64Sub)                      \
37   V(Arm64Sub32)                    \
38   V(Arm64Mul)                      \
39   V(Arm64Mul32)                    \
40   V(Arm64Smull)                    \
41   V(Arm64Umull)                    \
42   V(Arm64Madd)                     \
43   V(Arm64Madd32)                   \
44   V(Arm64Msub)                     \
45   V(Arm64Msub32)                   \
46   V(Arm64Mneg)                     \
47   V(Arm64Mneg32)                   \
48   V(Arm64Idiv)                     \
49   V(Arm64Idiv32)                   \
50   V(Arm64Udiv)                     \
51   V(Arm64Udiv32)                   \
52   V(Arm64Imod)                     \
53   V(Arm64Imod32)                   \
54   V(Arm64Umod)                     \
55   V(Arm64Umod32)                   \
56   V(Arm64Not)                      \
57   V(Arm64Not32)                    \
58   V(Arm64Neg)                      \
59   V(Arm64Neg32)                    \
60   V(Arm64Lsl)                      \
61   V(Arm64Lsl32)                    \
62   V(Arm64Lsr)                      \
63   V(Arm64Lsr32)                    \
64   V(Arm64Asr)                      \
65   V(Arm64Asr32)                    \
66   V(Arm64Ror)                      \
67   V(Arm64Ror32)                    \
68   V(Arm64Mov32)                    \
69   V(Arm64Sxtb32)                   \
70   V(Arm64Sxth32)                   \
71   V(Arm64Sxtw)                     \
72   V(Arm64Sbfx32)                   \
73   V(Arm64Ubfx)                     \
74   V(Arm64Ubfx32)                   \
75   V(Arm64Ubfiz32)                  \
76   V(Arm64Bfi)                      \
77   V(Arm64TestAndBranch32)          \
78   V(Arm64TestAndBranch)            \
79   V(Arm64CompareAndBranch32)       \
80   V(Arm64Claim)                    \
81   V(Arm64Poke)                     \
82   V(Arm64PokePair)                 \
83   V(Arm64Float32Cmp)               \
84   V(Arm64Float32Add)               \
85   V(Arm64Float32Sub)               \
86   V(Arm64Float32Mul)               \
87   V(Arm64Float32Div)               \
88   V(Arm64Float32Max)               \
89   V(Arm64Float32Min)               \
90   V(Arm64Float32Abs)               \
91   V(Arm64Float32Sqrt)              \
92   V(Arm64Float64Cmp)               \
93   V(Arm64Float64Add)               \
94   V(Arm64Float64Sub)               \
95   V(Arm64Float64Mul)               \
96   V(Arm64Float64Div)               \
97   V(Arm64Float64Mod)               \
98   V(Arm64Float64Max)               \
99   V(Arm64Float64Min)               \
100   V(Arm64Float64Abs)               \
101   V(Arm64Float64Neg)               \
102   V(Arm64Float64Sqrt)              \
103   V(Arm64Float64RoundDown)         \
104   V(Arm64Float64RoundTiesAway)     \
105   V(Arm64Float64RoundTruncate)     \
106   V(Arm64Float64RoundUp)           \
107   V(Arm64Float32ToFloat64)         \
108   V(Arm64Float64ToFloat32)         \
109   V(Arm64Float64ToInt32)           \
110   V(Arm64Float64ToUint32)          \
111   V(Arm64Int32ToFloat64)           \
112   V(Arm64Uint32ToFloat64)          \
113   V(Arm64Float64ExtractLowWord32)  \
114   V(Arm64Float64ExtractHighWord32) \
115   V(Arm64Float64InsertLowWord32)   \
116   V(Arm64Float64InsertHighWord32)  \
117   V(Arm64Float64MoveU64)           \
118   V(Arm64U64MoveFloat64)           \
119   V(Arm64LdrS)                     \
120   V(Arm64StrS)                     \
121   V(Arm64LdrD)                     \
122   V(Arm64StrD)                     \
123   V(Arm64Ldrb)                     \
124   V(Arm64Ldrsb)                    \
125   V(Arm64Strb)                     \
126   V(Arm64Ldrh)                     \
127   V(Arm64Ldrsh)                    \
128   V(Arm64Strh)                     \
129   V(Arm64LdrW)                     \
130   V(Arm64StrW)                     \
131   V(Arm64Ldr)                      \
132   V(Arm64Str)                      \
133   V(Arm64StoreWriteBarrier)
134
135
136 // Addressing modes represent the "shape" of inputs to an instruction.
137 // Many instructions support multiple addressing modes. Addressing modes
138 // are encoded into the InstructionCode of the instruction and tell the
139 // code generator after register allocation which assembler method to call.
140 //
141 // We use the following local notation for addressing modes:
142 //
143 // R = register
144 // O = register or stack slot
145 // D = double register
146 // I = immediate (handle, external, int32)
147 // MRI = [register + immediate]
148 // MRR = [register + register]
149 #define TARGET_ADDRESSING_MODE_LIST(V)                          \
150   V(MRI)              /* [%r0 + K] */                           \
151   V(MRR)              /* [%r0 + %r1] */                         \
152   V(Operand2_R_LSL_I) /* %r0 LSL K */                           \
153   V(Operand2_R_LSR_I) /* %r0 LSR K */                           \
154   V(Operand2_R_ASR_I) /* %r0 ASR K */                           \
155   V(Operand2_R_ROR_I) /* %r0 ROR K */                           \
156   V(Operand2_R_UXTB)  /* %r0 UXTB (unsigned extend byte) */     \
157   V(Operand2_R_UXTH)  /* %r0 UXTH (unsigned extend halfword) */ \
158   V(Operand2_R_SXTB)  /* %r0 SXTB (signed extend byte) */       \
159   V(Operand2_R_SXTH)  /* %r0 SXTH (signed extend halfword) */
160
161 }  // namespace internal
162 }  // namespace compiler
163 }  // namespace v8
164
165 #endif  // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_