1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2024 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump = false;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
102 /* The enumeration strings associated with each value of a 6-bit RPRFM
104 const char *const aarch64_rprfmop_array[64] = {
113 /* Vector length multiples for a predicate-as-counter operand. Used in things
114 like AARCH64_OPND_SME_VLxN_10. */
115 const char *const aarch64_sme_vlxn_array[2] = {
120 /* Helper functions to determine which operand to be used to encode/decode
121 the size:Q fields for AdvSIMD instructions. */
124 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
126 return (qualifier >= AARCH64_OPND_QLF_V_8B
127 && qualifier <= AARCH64_OPND_QLF_V_1Q);
131 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
133 return (qualifier >= AARCH64_OPND_QLF_S_B
134 && qualifier <= AARCH64_OPND_QLF_S_Q);
143 DP_VECTOR_ACROSS_LANES,
146 static const char significant_operand_index [] =
148 0, /* DP_UNKNOWN, by default using operand 0. */
149 0, /* DP_VECTOR_3SAME */
150 1, /* DP_VECTOR_LONG */
151 2, /* DP_VECTOR_WIDE */
152 1, /* DP_VECTOR_ACROSS_LANES */
155 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
157 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
158 corresponds to one of a sequence of operands. */
160 static enum data_pattern
161 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
163 if (vector_qualifier_p (qualifiers[0]))
165 /* e.g. v.4s, v.4s, v.4s
166 or v.4h, v.4h, v.h[3]. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2])
169 && (aarch64_get_qualifier_esize (qualifiers[0])
170 == aarch64_get_qualifier_esize (qualifiers[1]))
171 && (aarch64_get_qualifier_esize (qualifiers[0])
172 == aarch64_get_qualifier_esize (qualifiers[2])))
173 return DP_VECTOR_3SAME;
174 /* e.g. v.8h, v.8b, v.8b.
175 or v.4s, v.4h, v.h[2].
177 if (vector_qualifier_p (qualifiers[1])
178 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
179 && (aarch64_get_qualifier_esize (qualifiers[0])
180 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
181 return DP_VECTOR_LONG;
182 /* e.g. v.8h, v.8h, v.8b. */
183 if (qualifiers[0] == qualifiers[1]
184 && vector_qualifier_p (qualifiers[2])
185 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
186 && (aarch64_get_qualifier_esize (qualifiers[0])
187 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
188 && (aarch64_get_qualifier_esize (qualifiers[0])
189 == aarch64_get_qualifier_esize (qualifiers[1])))
190 return DP_VECTOR_WIDE;
192 else if (fp_qualifier_p (qualifiers[0]))
194 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
195 if (vector_qualifier_p (qualifiers[1])
196 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
197 return DP_VECTOR_ACROSS_LANES;
203 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
204 the AdvSIMD instructions. */
205 /* N.B. it is possible to do some optimization that doesn't call
206 get_data_pattern each time when we need to select an operand. We can
207 either buffer the caculated the result or statically generate the data,
208 however, it is not obvious that the optimization will bring significant
212 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
215 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
218 /* Instruction bit-fields.
219 + Keep synced with 'enum aarch64_field_kind'. */
220 const aarch64_field fields[] =
223 { 8, 4 }, /* CRm: in the system instructions. */
224 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
225 { 12, 4 }, /* CRn: in the system instructions. */
226 { 10, 8 }, /* CSSC_imm8. */
227 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
228 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
229 { 0, 5 }, /* LSE128_Rt: Shared input+output operand register. */
230 { 16, 5 }, /* LSE128_Rt2: Shared input+output operand register 2. */
231 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
232 { 22, 1 }, /* N: in logical (immediate) instructions. */
233 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
234 { 10, 5 }, /* Ra: in fp instructions. */
235 { 0, 5 }, /* Rd: in many integer instructions. */
236 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
237 { 5, 5 }, /* Rn: in many integer instructions. */
238 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
239 { 0, 5 }, /* Rt: in load/store instructions. */
240 { 10, 5 }, /* Rt2: in load/store pair instructions. */
241 { 12, 1 }, /* S: in load/store reg offset instructions. */
242 { 12, 2 }, /* SM3_imm2: Indexed element SM3 2 bits index immediate. */
243 { 1, 3 }, /* SME_Pdx2: predicate register, multiple of 2, [3:1]. */
244 { 13, 3 }, /* SME_Pm: second source scalable predicate register P0-P7. */
245 { 0, 3 }, /* SME_PNd3: PN0-PN7, bits [2:0]. */
246 { 5, 3 }, /* SME_PNn3: PN0-PN7, bits [7:5]. */
247 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
248 { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
249 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
250 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
251 { 10, 1 }, /* SME_VL_10: VLx2 or VLx4, bit [10]. */
252 { 13, 1 }, /* SME_VL_13: VLx2 or VLx4, bit [13]. */
253 { 0, 2 }, /* SME_ZAda_2b: tile ZA0-ZA3. */
254 { 0, 3 }, /* SME_ZAda_3b: tile ZA0-ZA7. */
255 { 1, 4 }, /* SME_Zdn2: Z0-Z31, multiple of 2, bits [4:1]. */
256 { 2, 3 }, /* SME_Zdn4: Z0-Z31, multiple of 4, bits [4:2]. */
257 { 16, 4 }, /* SME_Zm: Z0-Z15, bits [19:16]. */
258 { 17, 4 }, /* SME_Zm2: Z0-Z31, multiple of 2, bits [20:17]. */
259 { 18, 3 }, /* SME_Zm4: Z0-Z31, multiple of 4, bits [20:18]. */
260 { 6, 4 }, /* SME_Zn2: Z0-Z31, multiple of 2, bits [9:6]. */
261 { 7, 3 }, /* SME_Zn4: Z0-Z31, multiple of 4, bits [9:7]. */
262 { 4, 1 }, /* SME_ZtT: upper bit of Zt, bit [4]. */
263 { 0, 3 }, /* SME_Zt3: lower 3 bits of Zt, bits [2:0]. */
264 { 0, 2 }, /* SME_Zt2: lower 2 bits of Zt, bits [1:0]. */
265 { 23, 1 }, /* SME_i1: immediate field, bit 23. */
266 { 12, 2 }, /* SME_size_12: bits [13:12]. */
267 { 22, 2 }, /* SME_size_22: size<1>, size<0> class field, [23:22]. */
268 { 23, 1 }, /* SME_sz_23: bit [23]. */
269 { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
270 { 18, 3 }, /* SME_tszl: immediate and qualifier field, bits [20:18]. */
271 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
272 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
273 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
274 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
275 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
276 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
277 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
278 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
279 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
280 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
281 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
282 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
283 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
284 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
285 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
286 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
287 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
289 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
290 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
291 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
292 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
293 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
294 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
295 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
296 { 5, 1 }, /* SVE_i1: single-bit immediate. */
297 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
298 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
299 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
300 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
301 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
302 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
303 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
304 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
305 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
306 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
307 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
308 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
309 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
310 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
311 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
312 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
313 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
314 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
315 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
316 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
317 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
318 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
319 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
320 { 16, 4 }, /* SVE_tsz: triangular size select. */
321 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
322 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
323 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
324 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
325 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
326 { 22, 1 }, /* S_imm10: in LDRAA and LDRAB instructions. */
327 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
328 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
329 { 19, 5 }, /* b40: in the test bit and branch instructions. */
330 { 31, 1 }, /* b5: in the test bit and branch instructions. */
331 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
332 { 12, 4 }, /* cond: condition flags as a source operand. */
333 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
334 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
335 { 21, 2 }, /* hw: in move wide constant instructions. */
336 { 0, 1 }, /* imm1_0: general immediate in bits [0]. */
337 { 2, 1 }, /* imm1_2: general immediate in bits [2]. */
338 { 8, 1 }, /* imm1_8: general immediate in bits [8]. */
339 { 10, 1 }, /* imm1_10: general immediate in bits [10]. */
340 { 15, 1 }, /* imm1_15: general immediate in bits [15]. */
341 { 16, 1 }, /* imm1_16: general immediate in bits [16]. */
342 { 0, 2 }, /* imm2_0: general immediate in bits [1:0]. */
343 { 1, 2 }, /* imm2_1: general immediate in bits [2:1]. */
344 { 8, 2 }, /* imm2_8: general immediate in bits [9:8]. */
345 { 10, 2 }, /* imm2_10: 2-bit immediate, bits [11:10] */
346 { 12, 2 }, /* imm2_12: 2-bit immediate, bits [13:12] */
347 { 15, 2 }, /* imm2_15: 2-bit immediate, bits [16:15] */
348 { 16, 2 }, /* imm2_16: 2-bit immediate, bits [17:16] */
349 { 19, 2 }, /* imm2_19: 2-bit immediate, bits [20:19] */
350 { 0, 3 }, /* imm3_0: general immediate in bits [2:0]. */
351 { 5, 3 }, /* imm3_5: general immediate in bits [7:5]. */
352 { 10, 3 }, /* imm3_10: in add/sub extended reg instructions. */
353 { 12, 3 }, /* imm3_12: general immediate in bits [14:12]. */
354 { 14, 3 }, /* imm3_14: general immediate in bits [16:14]. */
355 { 15, 3 }, /* imm3_15: general immediate in bits [17:15]. */
356 { 0, 4 }, /* imm4_0: in rmif instructions. */
357 { 5, 4 }, /* imm4_5: in SME instructions. */
358 { 10, 4 }, /* imm4_10: in adddg/subg instructions. */
359 { 11, 4 }, /* imm4_11: in advsimd ext and advsimd ins instructions. */
360 { 14, 4 }, /* imm4_14: general immediate in bits [17:14]. */
361 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
362 { 10, 6 }, /* imm6_10: in add/sub reg shifted instructions. */
363 { 15, 6 }, /* imm6_15: in rmif instructions. */
364 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
365 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
366 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
367 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
368 { 5, 14 }, /* imm14: in test bit and branch instructions. */
369 { 0, 16 }, /* imm16_0: in udf instruction. */
370 { 5, 16 }, /* imm16_5: in exception instructions. */
371 { 5, 19 }, /* imm19: e.g. in CBZ. */
372 { 0, 26 }, /* imm26: in unconditional branch instructions. */
373 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
374 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
375 { 5, 19 }, /* immhi: e.g. in ADRP. */
376 { 29, 2 }, /* immlo: e.g. in ADRP. */
377 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
378 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
379 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
380 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
381 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
382 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
383 { 30, 1 }, /* lse_sz: in LSE extension atomic instructions. */
384 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
385 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
386 { 19, 2 }, /* op0: in the system instructions. */
387 { 16, 3 }, /* op1: in the system instructions. */
388 { 5, 3 }, /* op2: in the system instructions. */
389 { 22, 2 }, /* opc: in load/store reg offset instructions. */
390 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
391 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
392 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
393 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
394 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
395 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
396 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
397 { 31, 1 }, /* sf: in integer data processing instructions. */
398 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
399 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
400 { 22, 1 }, /* sz: 1-bit element size select. */
401 { 22, 2 }, /* type: floating point type field in fp data inst. */
402 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
403 { 5, 3 }, /* off3: immediate offset used to calculate slice number in a
405 { 5, 2 }, /* off2: immediate offset used to calculate slice number in
407 { 7, 1 }, /* ZAn_1: name of the 1bit encoded ZA tile. */
408 { 5, 1 }, /* ol: immediate offset used to calculate slice number in a ZA
410 { 6, 2 }, /* ZAn_2: name of the 2bit encoded ZA tile. */
411 { 5, 3 }, /* ZAn_3: name of the 3bit encoded ZA tile. */
412 { 6, 1 }, /* ZAn: name of the bit encoded ZA tile. */
413 { 12, 4 }, /* opc2: in rcpc3 ld/st inst deciding the pre/post-index. */
414 { 30, 2 }, /* rcpc3_size: in rcpc3 ld/st, field controls Rt/Rt2 width. */
417 enum aarch64_operand_class
418 aarch64_get_operand_class (enum aarch64_opnd type)
420 return aarch64_operands[type].op_class;
424 aarch64_get_operand_name (enum aarch64_opnd type)
426 return aarch64_operands[type].name;
429 /* Get operand description string.
430 This is usually for the diagnosis purpose. */
432 aarch64_get_operand_desc (enum aarch64_opnd type)
434 return aarch64_operands[type].desc;
437 /* Table of all conditional affixes. */
438 const aarch64_cond aarch64_conds[16] =
440 {{"eq", "none"}, 0x0},
441 {{"ne", "any"}, 0x1},
442 {{"cs", "hs", "nlast"}, 0x2},
443 {{"cc", "lo", "ul", "last"}, 0x3},
444 {{"mi", "first"}, 0x4},
445 {{"pl", "nfrst"}, 0x5},
448 {{"hi", "pmore"}, 0x8},
449 {{"ls", "plast"}, 0x9},
450 {{"ge", "tcont"}, 0xa},
451 {{"lt", "tstop"}, 0xb},
459 get_cond_from_value (aarch64_insn value)
462 return &aarch64_conds[(unsigned int) value];
466 get_inverted_cond (const aarch64_cond *cond)
468 return &aarch64_conds[cond->value ^ 0x1];
471 /* Table describing the operand extension/shifting operators; indexed by
472 enum aarch64_modifier_kind.
474 The value column provides the most common values for encoding modifiers,
475 which enables table-driven encoding/decoding for the modifiers. */
476 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
497 enum aarch64_modifier_kind
498 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
500 return desc - aarch64_operand_modifiers;
504 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
506 return aarch64_operand_modifiers[kind].value;
509 enum aarch64_modifier_kind
510 aarch64_get_operand_modifier_from_value (aarch64_insn value,
514 return AARCH64_MOD_UXTB + value;
516 return AARCH64_MOD_LSL - value;
520 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
522 return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
526 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
528 return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
531 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
551 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
552 { /* CRm<3:2> #imm */
553 { "oshnxs", 16 }, /* 00 16 */
554 { "nshnxs", 20 }, /* 01 20 */
555 { "ishnxs", 24 }, /* 10 24 */
556 { "synxs", 28 }, /* 11 28 */
559 /* Table describing the operands supported by the aliases of the HINT
562 The name column is the operand that is accepted for the alias. The value
563 column is the hint number of the alias. The list of operands is terminated
564 by NULL in the name column. */
566 const struct aarch64_name_value_pair aarch64_hint_options[] =
568 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
569 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
570 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
571 { "dsync", HINT_OPD_DSYNC }, /* GCSB DSYNC. */
572 { "c", HINT_OPD_C }, /* BTI C. */
573 { "j", HINT_OPD_J }, /* BTI J. */
574 { "jc", HINT_OPD_JC }, /* BTI JC. */
575 { NULL, HINT_OPD_NULL },
578 /* op -> op: load = 0 instruction = 1 store = 2
580 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
581 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
582 const struct aarch64_name_value_pair aarch64_prfops[32] =
584 { "pldl1keep", B(0, 1, 0) },
585 { "pldl1strm", B(0, 1, 1) },
586 { "pldl2keep", B(0, 2, 0) },
587 { "pldl2strm", B(0, 2, 1) },
588 { "pldl3keep", B(0, 3, 0) },
589 { "pldl3strm", B(0, 3, 1) },
590 { "pldslckeep", B(0, 4, 0) },
591 { "pldslcstrm", B(0, 4, 1) },
592 { "plil1keep", B(1, 1, 0) },
593 { "plil1strm", B(1, 1, 1) },
594 { "plil2keep", B(1, 2, 0) },
595 { "plil2strm", B(1, 2, 1) },
596 { "plil3keep", B(1, 3, 0) },
597 { "plil3strm", B(1, 3, 1) },
598 { "plislckeep", B(1, 4, 0) },
599 { "plislcstrm", B(1, 4, 1) },
600 { "pstl1keep", B(2, 1, 0) },
601 { "pstl1strm", B(2, 1, 1) },
602 { "pstl2keep", B(2, 2, 0) },
603 { "pstl2strm", B(2, 2, 1) },
604 { "pstl3keep", B(2, 3, 0) },
605 { "pstl3strm", B(2, 3, 1) },
606 { "pstslckeep", B(2, 4, 0) },
607 { "pstslcstrm", B(2, 4, 1) },
619 /* Utilities on value constraint. */
622 value_in_range_p (int64_t value, int low, int high)
624 return (value >= low && value <= high) ? 1 : 0;
627 /* Return true if VALUE is a multiple of ALIGN. */
629 value_aligned_p (int64_t value, int align)
631 return (value % align) == 0;
634 /* A signed value fits in a field. */
636 value_fit_signed_field_p (int64_t value, unsigned width)
639 if (width < sizeof (value) * 8)
641 int64_t lim = (uint64_t) 1 << (width - 1);
642 if (value >= -lim && value < lim)
648 /* An unsigned value fits in a field. */
650 value_fit_unsigned_field_p (int64_t value, unsigned width)
653 if (width < sizeof (value) * 8)
655 int64_t lim = (uint64_t) 1 << width;
656 if (value >= 0 && value < lim)
662 /* Return 1 if OPERAND is SP or WSP. */
664 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
666 return ((aarch64_get_operand_class (operand->type)
667 == AARCH64_OPND_CLASS_INT_REG)
668 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
669 && operand->reg.regno == 31);
672 /* Return 1 if OPERAND is XZR or WZP. */
674 aarch64_zero_register_p (const aarch64_opnd_info *operand)
676 return ((aarch64_get_operand_class (operand->type)
677 == AARCH64_OPND_CLASS_INT_REG)
678 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
679 && operand->reg.regno == 31);
682 /* Return true if the operand *OPERAND that has the operand code
683 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
684 qualified by the qualifier TARGET. */
687 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
688 aarch64_opnd_qualifier_t target)
690 switch (operand->qualifier)
692 case AARCH64_OPND_QLF_W:
693 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
696 case AARCH64_OPND_QLF_X:
697 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
700 case AARCH64_OPND_QLF_WSP:
701 if (target == AARCH64_OPND_QLF_W
702 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
705 case AARCH64_OPND_QLF_SP:
706 if (target == AARCH64_OPND_QLF_X
707 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
717 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
718 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
720 Return NIL if more than one expected qualifiers are found. */
722 aarch64_opnd_qualifier_t
723 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
725 const aarch64_opnd_qualifier_t known_qlf,
732 When the known qualifier is NIL, we have to assume that there is only
733 one qualifier sequence in the *QSEQ_LIST and return the corresponding
734 qualifier directly. One scenario is that for instruction
735 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
736 which has only one possible valid qualifier sequence
738 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
739 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
741 Because the qualifier NIL has dual roles in the qualifier sequence:
742 it can mean no qualifier for the operand, or the qualifer sequence is
743 not in use (when all qualifiers in the sequence are NILs), we have to
744 handle this special case here. */
745 if (known_qlf == AARCH64_OPND_NIL)
747 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
748 return qseq_list[0][idx];
751 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
753 if (qseq_list[i][known_idx] == known_qlf)
756 /* More than one sequences are found to have KNOWN_QLF at
758 return AARCH64_OPND_NIL;
763 return qseq_list[saved_i][idx];
766 enum operand_qualifier_kind
774 /* Operand qualifier description. */
775 struct operand_qualifier_data
777 /* The usage of the three data fields depends on the qualifier kind. */
784 enum operand_qualifier_kind kind;
787 /* Indexed by the operand qualifier enumerators. */
788 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
790 {0, 0, 0, "NIL", OQK_NIL},
792 /* Operand variant qualifiers.
794 element size, number of elements and common value for encoding. */
796 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
797 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
798 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
799 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
801 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
802 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
803 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
804 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
805 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
806 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
807 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
809 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
810 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
811 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
812 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
813 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
814 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
815 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
816 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
817 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
818 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
819 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
821 {0, 0, 0, "z", OQK_OPD_VARIANT},
822 {0, 0, 0, "m", OQK_OPD_VARIANT},
824 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
825 {16, 0, 0, "tag", OQK_OPD_VARIANT},
827 /* Qualifiers constraining the value range.
829 Lower bound, higher bound, unused. */
831 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
832 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
833 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
834 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
835 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
836 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
837 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
839 /* Qualifiers for miscellaneous purpose.
841 unused, unused and unused. */
846 {0, 0, 0, "retrieving", 0},
850 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
852 return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
856 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
858 return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
862 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
864 return aarch64_opnd_qualifiers[qualifier].desc;
867 /* Given an operand qualifier, return the expected data element size
868 of a qualified operand. */
870 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
872 assert (operand_variant_qualifier_p (qualifier));
873 return aarch64_opnd_qualifiers[qualifier].data0;
877 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
879 assert (operand_variant_qualifier_p (qualifier));
880 return aarch64_opnd_qualifiers[qualifier].data1;
884 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
886 assert (operand_variant_qualifier_p (qualifier));
887 return aarch64_opnd_qualifiers[qualifier].data2;
891 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
893 assert (qualifier_value_in_range_constraint_p (qualifier));
894 return aarch64_opnd_qualifiers[qualifier].data0;
898 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
900 assert (qualifier_value_in_range_constraint_p (qualifier));
901 return aarch64_opnd_qualifiers[qualifier].data1;
906 aarch64_verbose (const char *str, ...)
917 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
921 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
922 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
927 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
928 const aarch64_opnd_qualifier_t *qualifier)
931 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
933 aarch64_verbose ("dump_match_qualifiers:");
934 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
935 curr[i] = opnd[i].qualifier;
936 dump_qualifier_sequence (curr);
937 aarch64_verbose ("against");
938 dump_qualifier_sequence (qualifier);
940 #endif /* DEBUG_AARCH64 */
942 /* This function checks if the given instruction INSN is a destructive
943 instruction based on the usage of the registers. It does not recognize
944 unary destructive instructions. */
946 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
949 const enum aarch64_opnd *opnds = opcode->operands;
951 if (opnds[0] == AARCH64_OPND_NIL)
954 while (opnds[++i] != AARCH64_OPND_NIL)
955 if (opnds[i] == opnds[0])
961 /* TODO improve this, we can have an extra field at the runtime to
962 store the number of operands rather than calculating it every time. */
965 aarch64_num_of_operands (const aarch64_opcode *opcode)
968 const enum aarch64_opnd *opnds = opcode->operands;
969 while (opnds[i++] != AARCH64_OPND_NIL)
972 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
976 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
977 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
979 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
980 This is always 0 if the function succeeds.
982 N.B. on the entry, it is very likely that only some operands in *INST
983 have had their qualifiers been established.
985 If STOP_AT is not -1, the function will only try to match
986 the qualifier sequence for operands before and including the operand
987 of index STOP_AT; and on success *RET will only be filled with the first
988 (STOP_AT+1) qualifiers.
990 A couple examples of the matching algorithm:
998 Apart from serving the main encoding routine, this can also be called
999 during or after the operand decoding. */
1002 aarch64_find_best_match (const aarch64_inst *inst,
1003 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
1004 int stop_at, aarch64_opnd_qualifier_t *ret,
1007 int i, num_opnds, invalid, min_invalid;
1008 const aarch64_opnd_qualifier_t *qualifiers;
1010 num_opnds = aarch64_num_of_operands (inst->opcode);
1013 DEBUG_TRACE ("SUCCEED: no operand");
1018 if (stop_at < 0 || stop_at >= num_opnds)
1019 stop_at = num_opnds - 1;
1021 /* For each pattern. */
1022 min_invalid = num_opnds;
1023 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
1026 qualifiers = *qualifiers_list;
1028 /* Start as positive. */
1031 DEBUG_TRACE ("%d", i);
1032 #ifdef DEBUG_AARCH64
1034 dump_match_qualifiers (inst->operands, qualifiers);
1037 /* The first entry should be taken literally, even if it's an empty
1038 qualifier sequence. (This matters for strict testing.) In other
1039 positions an empty sequence acts as a terminator. */
1040 if (i > 0 && empty_qualifier_sequence_p (qualifiers))
1043 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
1045 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL
1046 && !(inst->opcode->flags & F_STRICT))
1048 /* Either the operand does not have qualifier, or the qualifier
1049 for the operand needs to be deduced from the qualifier
1051 In the latter case, any constraint checking related with
1052 the obtained qualifier should be done later in
1053 operand_general_constraint_met_p. */
1056 else if (*qualifiers != inst->operands[j].qualifier)
1058 /* Unless the target qualifier can also qualify the operand
1059 (which has already had a non-nil qualifier), non-equal
1060 qualifiers are generally un-matched. */
1061 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
1067 continue; /* Equal qualifiers are certainly matched. */
1070 if (min_invalid > invalid)
1071 min_invalid = invalid;
1073 /* Qualifiers established. */
1074 if (min_invalid == 0)
1078 *invalid_count = min_invalid;
1079 if (min_invalid == 0)
1081 /* Fill the result in *RET. */
1083 qualifiers = *qualifiers_list;
1085 DEBUG_TRACE ("complete qualifiers using list %d", i);
1086 #ifdef DEBUG_AARCH64
1088 dump_qualifier_sequence (qualifiers);
1091 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1092 ret[j] = *qualifiers;
1093 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1094 ret[j] = AARCH64_OPND_QLF_NIL;
1096 DEBUG_TRACE ("SUCCESS");
1100 DEBUG_TRACE ("FAIL");
1104 /* Operand qualifier matching and resolving.
1106 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1107 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1109 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
1110 This is always 0 if the function succeeds.
1112 if UPDATE_P, update the qualifier(s) in *INST after the matching
1116 match_operands_qualifier (aarch64_inst *inst, bool update_p,
1120 aarch64_opnd_qualifier_seq_t qualifiers;
1122 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1123 qualifiers, invalid_count))
1125 DEBUG_TRACE ("matching FAIL");
1129 /* Update the qualifiers. */
1131 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1133 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1135 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1136 "update %s with %s for operand %d",
1137 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1138 aarch64_get_qualifier_name (qualifiers[i]), i);
1139 inst->operands[i].qualifier = qualifiers[i];
1142 DEBUG_TRACE ("matching SUCCESS");
1146 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1149 IS32 indicates whether value is a 32-bit immediate or not.
1150 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1151 amount will be returned in *SHIFT_AMOUNT. */
1154 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1158 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1162 /* Allow all zeros or all ones in top 32-bits, so that
1163 32-bit constant expressions like ~0x80000000 are
1165 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1166 /* Immediate out of range. */
1168 value &= 0xffffffff;
1171 /* first, try movz then movn */
1173 if ((value & ((uint64_t) 0xffff << 0)) == value)
1175 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1177 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1179 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1184 DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1188 if (shift_amount != NULL)
1189 *shift_amount = amount;
1191 DEBUG_TRACE ("exit true with amount %d", amount);
1196 /* Build the accepted values for immediate logical SIMD instructions.
1198 The standard encodings of the immediate value are:
1199 N imms immr SIMD size R S
1200 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1201 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1202 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1203 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1204 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1205 0 11110s 00000r 2 UInt(r) UInt(s)
1206 where all-ones value of S is reserved.
1208 Let's call E the SIMD size.
1210 The immediate value is: S+1 bits '1' rotated to the right by R.
1212 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1213 (remember S != E - 1). */
1215 #define TOTAL_IMM_NB 5334
1220 aarch64_insn encoding;
1221 } simd_imm_encoding;
1223 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1226 simd_imm_encoding_cmp(const void *i1, const void *i2)
1228 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1229 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1231 if (imm1->imm < imm2->imm)
1233 if (imm1->imm > imm2->imm)
1238 /* immediate bitfield standard encoding
1239 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1240 1 ssssss rrrrrr 64 rrrrrr ssssss
1241 0 0sssss 0rrrrr 32 rrrrr sssss
1242 0 10ssss 00rrrr 16 rrrr ssss
1243 0 110sss 000rrr 8 rrr sss
1244 0 1110ss 0000rr 4 rr ss
1245 0 11110s 00000r 2 r s */
1247 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1249 return (is64 << 12) | (r << 6) | s;
1253 build_immediate_table (void)
1255 uint32_t log_e, e, s, r, s_mask;
1261 for (log_e = 1; log_e <= 6; log_e++)
1263 /* Get element size. */
1268 mask = 0xffffffffffffffffull;
1274 mask = (1ull << e) - 1;
1276 1 ((1 << 4) - 1) << 2 = 111100
1277 2 ((1 << 3) - 1) << 3 = 111000
1278 3 ((1 << 2) - 1) << 4 = 110000
1279 4 ((1 << 1) - 1) << 5 = 100000
1280 5 ((1 << 0) - 1) << 6 = 000000 */
1281 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1283 for (s = 0; s < e - 1; s++)
1284 for (r = 0; r < e; r++)
1286 /* s+1 consecutive bits to 1 (s < 63) */
1287 imm = (1ull << (s + 1)) - 1;
1288 /* rotate right by r */
1290 imm = (imm >> r) | ((imm << (e - r)) & mask);
1291 /* replicate the constant depending on SIMD size */
1294 case 1: imm = (imm << 2) | imm;
1296 case 2: imm = (imm << 4) | imm;
1298 case 3: imm = (imm << 8) | imm;
1300 case 4: imm = (imm << 16) | imm;
1302 case 5: imm = (imm << 32) | imm;
1307 simd_immediates[nb_imms].imm = imm;
1308 simd_immediates[nb_imms].encoding =
1309 encode_immediate_bitfield(is64, s | s_mask, r);
1313 assert (nb_imms == TOTAL_IMM_NB);
1314 qsort(simd_immediates, nb_imms,
1315 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1318 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1319 be accepted by logical (immediate) instructions
1320 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1322 ESIZE is the number of bytes in the decoded immediate value.
1323 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1324 VALUE will be returned in *ENCODING. */
1327 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1329 simd_imm_encoding imm_enc;
1330 const simd_imm_encoding *imm_encoding;
1331 static bool initialized = false;
1335 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1340 build_immediate_table ();
1344 /* Allow all zeros or all ones in top bits, so that
1345 constant expressions like ~1 are permitted. */
1346 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1347 if ((value & ~upper) != value && (value | upper) != value)
1350 /* Replicate to a full 64-bit value. */
1352 for (i = esize * 8; i < 64; i *= 2)
1353 value |= (value << i);
1355 imm_enc.imm = value;
1356 imm_encoding = (const simd_imm_encoding *)
1357 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1358 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1359 if (imm_encoding == NULL)
1361 DEBUG_TRACE ("exit with false");
1364 if (encoding != NULL)
1365 *encoding = imm_encoding->encoding;
1366 DEBUG_TRACE ("exit with true");
1370 /* If 64-bit immediate IMM is in the format of
1371 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1372 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1373 of value "abcdefgh". Otherwise return -1. */
1375 aarch64_shrink_expanded_imm8 (uint64_t imm)
1381 for (i = 0; i < 8; i++)
1383 byte = (imm >> (8 * i)) & 0xff;
1386 else if (byte != 0x00)
1392 /* Utility inline functions for operand_general_constraint_met_p. */
1395 set_error (aarch64_operand_error *mismatch_detail,
1396 enum aarch64_operand_error_kind kind, int idx,
1399 if (mismatch_detail == NULL)
1401 mismatch_detail->kind = kind;
1402 mismatch_detail->index = idx;
1403 mismatch_detail->error = error;
1407 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1410 if (mismatch_detail == NULL)
1412 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1416 set_invalid_regno_error (aarch64_operand_error *mismatch_detail, int idx,
1417 const char *prefix, int lower_bound, int upper_bound)
1419 if (mismatch_detail == NULL)
1421 set_error (mismatch_detail, AARCH64_OPDE_INVALID_REGNO, idx, NULL);
1422 mismatch_detail->data[0].s = prefix;
1423 mismatch_detail->data[1].i = lower_bound;
1424 mismatch_detail->data[2].i = upper_bound;
1428 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1429 int idx, int lower_bound, int upper_bound,
1432 if (mismatch_detail == NULL)
1434 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1435 mismatch_detail->data[0].i = lower_bound;
1436 mismatch_detail->data[1].i = upper_bound;
1440 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1441 int idx, int lower_bound, int upper_bound)
1443 if (mismatch_detail == NULL)
1445 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1446 _("immediate value"));
1450 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1451 int idx, int lower_bound, int upper_bound)
1453 if (mismatch_detail == NULL)
1455 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1456 _("immediate offset"));
1460 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1461 int idx, int lower_bound, int upper_bound)
1463 if (mismatch_detail == NULL)
1465 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1466 _("register number"));
1470 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1471 int idx, int lower_bound, int upper_bound)
1473 if (mismatch_detail == NULL)
1475 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1476 _("register element index"));
1480 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1481 int idx, int lower_bound, int upper_bound)
1483 if (mismatch_detail == NULL)
1485 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1489 /* Report that the MUL modifier in operand IDX should be in the range
1490 [LOWER_BOUND, UPPER_BOUND]. */
1492 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1493 int idx, int lower_bound, int upper_bound)
1495 if (mismatch_detail == NULL)
1497 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1502 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1505 if (mismatch_detail == NULL)
1507 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1508 mismatch_detail->data[0].i = alignment;
1512 set_reg_list_length_error (aarch64_operand_error *mismatch_detail, int idx,
1515 if (mismatch_detail == NULL)
1517 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_LENGTH, idx, NULL);
1518 mismatch_detail->data[0].i = 1 << expected_num;
1522 set_reg_list_stride_error (aarch64_operand_error *mismatch_detail, int idx,
1525 if (mismatch_detail == NULL)
1527 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_STRIDE, idx, NULL);
1528 mismatch_detail->data[0].i = 1 << expected_num;
1532 set_invalid_vg_size (aarch64_operand_error *mismatch_detail,
1533 int idx, int expected)
1535 if (mismatch_detail == NULL)
1537 set_error (mismatch_detail, AARCH64_OPDE_INVALID_VG_SIZE, idx, NULL);
1538 mismatch_detail->data[0].i = expected;
1542 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1545 if (mismatch_detail == NULL)
1547 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1550 /* Check that indexed register operand OPND has a register in the range
1551 [MIN_REGNO, MAX_REGNO] and an index in the range [MIN_INDEX, MAX_INDEX].
1552 PREFIX is the register prefix, such as "z" for SVE vector registers. */
1555 check_reglane (const aarch64_opnd_info *opnd,
1556 aarch64_operand_error *mismatch_detail, int idx,
1557 const char *prefix, int min_regno, int max_regno,
1558 int min_index, int max_index)
1560 if (!value_in_range_p (opnd->reglane.regno, min_regno, max_regno))
1562 set_invalid_regno_error (mismatch_detail, idx, prefix, min_regno,
1566 if (!value_in_range_p (opnd->reglane.index, min_index, max_index))
1568 set_elem_idx_out_of_range_error (mismatch_detail, idx, min_index,
1575 /* Check that register list operand OPND has NUM_REGS registers and a
1576 register stride of STRIDE. */
1579 check_reglist (const aarch64_opnd_info *opnd,
1580 aarch64_operand_error *mismatch_detail, int idx,
1581 int num_regs, int stride)
1583 if (opnd->reglist.num_regs != num_regs)
1585 set_reg_list_length_error (mismatch_detail, idx, num_regs);
1588 if (opnd->reglist.stride != stride)
1590 set_reg_list_stride_error (mismatch_detail, idx, stride);
1596 /* Check that indexed ZA operand OPND has:
1598 - a selection register in the range [MIN_WREG, MIN_WREG + 3]
1600 - RANGE_SIZE consecutive immediate offsets.
1602 - an initial immediate offset that is a multiple of RANGE_SIZE
1603 in the range [0, MAX_VALUE * RANGE_SIZE]
1605 - a vector group size of GROUP_SIZE. */
1608 check_za_access (const aarch64_opnd_info *opnd,
1609 aarch64_operand_error *mismatch_detail, int idx,
1610 int min_wreg, int max_value, unsigned int range_size,
1613 if (!value_in_range_p (opnd->indexed_za.index.regno, min_wreg, min_wreg + 3))
1616 set_other_error (mismatch_detail, idx,
1617 _("expected a selection register in the"
1619 else if (min_wreg == 8)
1620 set_other_error (mismatch_detail, idx,
1621 _("expected a selection register in the"
1628 int max_index = max_value * range_size;
1629 if (!value_in_range_p (opnd->indexed_za.index.imm, 0, max_index))
1631 set_offset_out_of_range_error (mismatch_detail, idx, 0, max_index);
1635 if ((opnd->indexed_za.index.imm % range_size) != 0)
1637 assert (range_size == 2 || range_size == 4);
1638 set_other_error (mismatch_detail, idx,
1640 ? _("starting offset is not a multiple of 2")
1641 : _("starting offset is not a multiple of 4"));
1645 if (opnd->indexed_za.index.countm1 != range_size - 1)
1647 if (range_size == 1)
1648 set_other_error (mismatch_detail, idx,
1649 _("expected a single offset rather than"
1651 else if (range_size == 2)
1652 set_other_error (mismatch_detail, idx,
1653 _("expected a range of two offsets"));
1654 else if (range_size == 4)
1655 set_other_error (mismatch_detail, idx,
1656 _("expected a range of four offsets"));
1662 /* The vector group specifier is optional in assembly code. */
1663 if (opnd->indexed_za.group_size != 0
1664 && opnd->indexed_za.group_size != group_size)
1666 set_invalid_vg_size (mismatch_detail, idx, group_size);
1673 /* Given a load/store operation, calculate the size of transferred data via a
1674 cumulative sum of qualifier sizes preceding the address operand in the
1675 OPNDS operand list argument. */
1677 calc_ldst_datasize (const aarch64_opnd_info *opnds)
1679 unsigned num_bytes = 0; /* total number of bytes transferred. */
1680 enum aarch64_operand_class opnd_class;
1681 enum aarch64_opnd type;
1683 for (int i = 0; i < AARCH64_MAX_OPND_NUM; i++)
1685 type = opnds[i].type;
1686 opnd_class = aarch64_operands[type].op_class;
1687 if (opnd_class == AARCH64_OPND_CLASS_ADDRESS)
1689 num_bytes += aarch64_get_qualifier_esize (opnds[i].qualifier);
1695 /* General constraint checking based on operand code.
1697 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1698 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1700 This function has to be called after the qualifiers for all operands
1703 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1704 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1705 of error message during the disassembling where error message is not
1706 wanted. We avoid the dynamic construction of strings of error messages
1707 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1708 use a combination of error code, static string and some integer data to
1709 represent an error. */
1712 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1713 enum aarch64_opnd type,
1714 const aarch64_opcode *opcode,
1715 aarch64_operand_error *mismatch_detail)
1717 unsigned num, modifiers, shift;
1719 int64_t imm, min_value, max_value;
1720 uint64_t uvalue, mask;
1721 const aarch64_opnd_info *opnd = opnds + idx;
1722 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1725 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1727 switch (aarch64_operands[type].op_class)
1729 case AARCH64_OPND_CLASS_INT_REG:
1730 /* Check for pair of xzr registers. */
1731 if (type == AARCH64_OPND_PAIRREG_OR_XZR
1732 && opnds[idx - 1].reg.regno == 0x1f)
1734 if (opnds[idx].reg.regno != 0x1f)
1736 set_syntax_error (mismatch_detail, idx - 1,
1737 _("second reg in pair should be xzr if first is"
1742 /* Check pair reg constraints for instructions taking a pair of
1743 consecutively-numbered general-purpose registers. */
1744 else if (type == AARCH64_OPND_PAIRREG
1745 || type == AARCH64_OPND_PAIRREG_OR_XZR)
1747 assert (idx == 1 || idx == 2 || idx == 3 || idx == 5);
1748 if (opnds[idx - 1].reg.regno % 2 != 0)
1750 set_syntax_error (mismatch_detail, idx - 1,
1751 _("reg pair must start from even reg"));
1754 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1756 set_syntax_error (mismatch_detail, idx,
1757 _("reg pair must be contiguous"));
1763 /* <Xt> may be optional in some IC and TLBI instructions. */
1764 if (type == AARCH64_OPND_Rt_SYS)
1766 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1767 == AARCH64_OPND_CLASS_SYSTEM));
1768 if (opnds[1].present
1769 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1771 set_other_error (mismatch_detail, idx, _("extraneous register"));
1774 if (!opnds[1].present
1775 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1777 set_other_error (mismatch_detail, idx, _("missing register"));
1783 case AARCH64_OPND_QLF_WSP:
1784 case AARCH64_OPND_QLF_SP:
1785 if (!aarch64_stack_pointer_p (opnd))
1787 set_other_error (mismatch_detail, idx,
1788 _("stack pointer register expected"));
1797 case AARCH64_OPND_CLASS_SVE_REG:
1800 case AARCH64_OPND_SVE_Zm3_INDEX:
1801 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1802 case AARCH64_OPND_SVE_Zm3_19_INDEX:
1803 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1804 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1805 case AARCH64_OPND_SVE_Zm4_INDEX:
1806 size = get_operand_fields_width (get_operand_from_code (type));
1807 shift = get_operand_specific_data (&aarch64_operands[type]);
1808 if (!check_reglane (opnd, mismatch_detail, idx,
1809 "z", 0, (1 << shift) - 1,
1810 0, (1u << (size - shift)) - 1))
1814 case AARCH64_OPND_SVE_Zn_INDEX:
1815 size = aarch64_get_qualifier_esize (opnd->qualifier);
1816 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1821 case AARCH64_OPND_SVE_Zm_imm4:
1822 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31, 0, 15))
1826 case AARCH64_OPND_SVE_Zn_5_INDEX:
1827 size = aarch64_get_qualifier_esize (opnd->qualifier);
1828 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1833 case AARCH64_OPND_SME_PNn3_INDEX1:
1834 case AARCH64_OPND_SME_PNn3_INDEX2:
1835 size = get_operand_field_width (get_operand_from_code (type), 1);
1836 if (!check_reglane (opnd, mismatch_detail, idx, "pn", 8, 15,
1837 0, (1 << size) - 1))
1841 case AARCH64_OPND_SME_Zn_INDEX1_16:
1842 case AARCH64_OPND_SME_Zn_INDEX2_15:
1843 case AARCH64_OPND_SME_Zn_INDEX2_16:
1844 case AARCH64_OPND_SME_Zn_INDEX3_14:
1845 case AARCH64_OPND_SME_Zn_INDEX3_15:
1846 case AARCH64_OPND_SME_Zn_INDEX4_14:
1847 size = get_operand_fields_width (get_operand_from_code (type)) - 5;
1848 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1849 0, (1 << size) - 1))
1853 case AARCH64_OPND_SME_Zm_INDEX1:
1854 case AARCH64_OPND_SME_Zm_INDEX2:
1855 case AARCH64_OPND_SME_Zm_INDEX3_1:
1856 case AARCH64_OPND_SME_Zm_INDEX3_2:
1857 case AARCH64_OPND_SME_Zm_INDEX3_10:
1858 case AARCH64_OPND_SME_Zm_INDEX4_1:
1859 case AARCH64_OPND_SME_Zm_INDEX4_10:
1860 size = get_operand_fields_width (get_operand_from_code (type)) - 4;
1861 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 15,
1862 0, (1 << size) - 1))
1866 case AARCH64_OPND_SME_Zm:
1867 if (opnd->reg.regno > 15)
1869 set_invalid_regno_error (mismatch_detail, idx, "z", 0, 15);
1874 case AARCH64_OPND_SME_PnT_Wm_imm:
1875 size = aarch64_get_qualifier_esize (opnd->qualifier);
1876 max_value = 16 / size - 1;
1877 if (!check_za_access (opnd, mismatch_detail, idx,
1878 12, max_value, 1, 0))
1887 case AARCH64_OPND_CLASS_SVE_REGLIST:
1890 case AARCH64_OPND_SME_Pdx2:
1891 case AARCH64_OPND_SME_Zdnx2:
1892 case AARCH64_OPND_SME_Zdnx4:
1893 case AARCH64_OPND_SME_Zmx2:
1894 case AARCH64_OPND_SME_Zmx4:
1895 case AARCH64_OPND_SME_Znx2:
1896 case AARCH64_OPND_SME_Znx4:
1897 case AARCH64_OPND_SME_Zt2:
1898 case AARCH64_OPND_SME_Zt3:
1899 case AARCH64_OPND_SME_Zt4:
1900 num = get_operand_specific_data (&aarch64_operands[type]);
1901 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
1903 if ((opnd->reglist.first_regno % num) != 0)
1905 set_other_error (mismatch_detail, idx,
1906 _("start register out of range"));
1911 case AARCH64_OPND_SME_Ztx2_STRIDED:
1912 case AARCH64_OPND_SME_Ztx4_STRIDED:
1913 /* 2-register lists have a stride of 8 and 4-register lists
1914 have a stride of 4. */
1915 num = get_operand_specific_data (&aarch64_operands[type]);
1916 if (!check_reglist (opnd, mismatch_detail, idx, num, 16 / num))
1918 num = 16 | (opnd->reglist.stride - 1);
1919 if ((opnd->reglist.first_regno & ~num) != 0)
1921 set_other_error (mismatch_detail, idx,
1922 _("start register out of range"));
1927 case AARCH64_OPND_SME_PdxN:
1928 case AARCH64_OPND_SVE_ZnxN:
1929 case AARCH64_OPND_SVE_ZtxN:
1930 num = get_opcode_dependent_value (opcode);
1931 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
1940 case AARCH64_OPND_CLASS_ZA_ACCESS:
1943 case AARCH64_OPND_SME_ZA_HV_idx_src:
1944 case AARCH64_OPND_SME_ZA_HV_idx_dest:
1945 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
1946 size = aarch64_get_qualifier_esize (opnd->qualifier);
1947 max_value = 16 / size - 1;
1948 if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value, 1,
1949 get_opcode_dependent_value (opcode)))
1953 case AARCH64_OPND_SME_ZA_array_off4:
1954 if (!check_za_access (opnd, mismatch_detail, idx, 12, 15, 1,
1955 get_opcode_dependent_value (opcode)))
1959 case AARCH64_OPND_SME_ZA_array_off3_0:
1960 case AARCH64_OPND_SME_ZA_array_off3_5:
1961 if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 1,
1962 get_opcode_dependent_value (opcode)))
1966 case AARCH64_OPND_SME_ZA_array_off1x4:
1967 if (!check_za_access (opnd, mismatch_detail, idx, 8, 1, 4,
1968 get_opcode_dependent_value (opcode)))
1972 case AARCH64_OPND_SME_ZA_array_off2x2:
1973 if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 2,
1974 get_opcode_dependent_value (opcode)))
1978 case AARCH64_OPND_SME_ZA_array_off2x4:
1979 if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 4,
1980 get_opcode_dependent_value (opcode)))
1984 case AARCH64_OPND_SME_ZA_array_off3x2:
1985 if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 2,
1986 get_opcode_dependent_value (opcode)))
1990 case AARCH64_OPND_SME_ZA_array_vrsb_1:
1991 if (!check_za_access (opnd, mismatch_detail, idx, 12, 7, 2,
1992 get_opcode_dependent_value (opcode)))
1996 case AARCH64_OPND_SME_ZA_array_vrsh_1:
1997 if (!check_za_access (opnd, mismatch_detail, idx, 12, 3, 2,
1998 get_opcode_dependent_value (opcode)))
2002 case AARCH64_OPND_SME_ZA_array_vrss_1:
2003 if (!check_za_access (opnd, mismatch_detail, idx, 12, 1, 2,
2004 get_opcode_dependent_value (opcode)))
2008 case AARCH64_OPND_SME_ZA_array_vrsd_1:
2009 if (!check_za_access (opnd, mismatch_detail, idx, 12, 0, 2,
2010 get_opcode_dependent_value (opcode)))
2014 case AARCH64_OPND_SME_ZA_array_vrsb_2:
2015 if (!check_za_access (opnd, mismatch_detail, idx, 12, 3, 4,
2016 get_opcode_dependent_value (opcode)))
2020 case AARCH64_OPND_SME_ZA_array_vrsh_2:
2021 if (!check_za_access (opnd, mismatch_detail, idx, 12, 1, 4,
2022 get_opcode_dependent_value (opcode)))
2026 case AARCH64_OPND_SME_ZA_array_vrss_2:
2027 case AARCH64_OPND_SME_ZA_array_vrsd_2:
2028 if (!check_za_access (opnd, mismatch_detail, idx, 12, 0, 4,
2029 get_opcode_dependent_value (opcode)))
2033 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
2034 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
2035 size = aarch64_get_qualifier_esize (opnd->qualifier);
2036 num = get_opcode_dependent_value (opcode);
2037 max_value = 16 / num / size;
2040 if (!check_za_access (opnd, mismatch_detail, idx,
2041 12, max_value, num, 0))
2050 case AARCH64_OPND_CLASS_PRED_REG:
2053 case AARCH64_OPND_SME_PNd3:
2054 case AARCH64_OPND_SME_PNg3:
2055 if (opnd->reg.regno < 8)
2057 set_invalid_regno_error (mismatch_detail, idx, "pn", 8, 15);
2063 if (opnd->reg.regno >= 8
2064 && get_operand_fields_width (get_operand_from_code (type)) == 3)
2066 set_invalid_regno_error (mismatch_detail, idx, "p", 0, 7);
2073 case AARCH64_OPND_CLASS_COND:
2074 if (type == AARCH64_OPND_COND1
2075 && (opnds[idx].cond->value & 0xe) == 0xe)
2077 /* Not allow AL or NV. */
2078 set_syntax_error (mismatch_detail, idx, NULL);
2082 case AARCH64_OPND_CLASS_ADDRESS:
2083 /* Check writeback. */
2084 switch (opcode->iclass)
2088 case ldstnapair_offs:
2091 if (opnd->addr.writeback == 1)
2093 set_syntax_error (mismatch_detail, idx,
2094 _("unexpected address writeback"));
2099 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
2101 set_syntax_error (mismatch_detail, idx,
2102 _("unexpected address writeback"));
2107 case ldstpair_indexed:
2110 if (opnd->addr.writeback == 0)
2112 set_syntax_error (mismatch_detail, idx,
2113 _("address writeback expected"));
2118 if (opnd->addr.writeback)
2119 if ((type == AARCH64_OPND_RCPC3_ADDR_PREIND_WB
2120 && !opnd->addr.preind)
2121 || (type == AARCH64_OPND_RCPC3_ADDR_POSTIND
2122 && !opnd->addr.postind))
2124 set_syntax_error (mismatch_detail, idx,
2125 _("unexpected address writeback"));
2131 assert (opnd->addr.writeback == 0);
2136 case AARCH64_OPND_ADDR_SIMM7:
2137 /* Scaled signed 7 bits immediate offset. */
2138 /* Get the size of the data element that is accessed, which may be
2139 different from that of the source register size,
2140 e.g. in strb/ldrb. */
2141 size = aarch64_get_qualifier_esize (opnd->qualifier);
2142 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
2144 set_offset_out_of_range_error (mismatch_detail, idx,
2145 -64 * size, 63 * size);
2148 if (!value_aligned_p (opnd->addr.offset.imm, size))
2150 set_unaligned_error (mismatch_detail, idx, size);
2154 case AARCH64_OPND_ADDR_OFFSET:
2155 case AARCH64_OPND_ADDR_SIMM9:
2156 /* Unscaled signed 9 bits immediate offset. */
2157 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
2159 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
2164 case AARCH64_OPND_ADDR_SIMM9_2:
2165 /* Unscaled signed 9 bits immediate offset, which has to be negative
2167 size = aarch64_get_qualifier_esize (qualifier);
2168 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
2169 && !value_aligned_p (opnd->addr.offset.imm, size))
2170 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
2172 set_other_error (mismatch_detail, idx,
2173 _("negative or unaligned offset expected"));
2176 case AARCH64_OPND_ADDR_SIMM10:
2177 /* Scaled signed 10 bits immediate offset. */
2178 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
2180 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
2183 if (!value_aligned_p (opnd->addr.offset.imm, 8))
2185 set_unaligned_error (mismatch_detail, idx, 8);
2190 case AARCH64_OPND_ADDR_SIMM11:
2191 /* Signed 11 bits immediate offset (multiple of 16). */
2192 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
2194 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
2198 if (!value_aligned_p (opnd->addr.offset.imm, 16))
2200 set_unaligned_error (mismatch_detail, idx, 16);
2205 case AARCH64_OPND_ADDR_SIMM13:
2206 /* Signed 13 bits immediate offset (multiple of 16). */
2207 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
2209 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
2213 if (!value_aligned_p (opnd->addr.offset.imm, 16))
2215 set_unaligned_error (mismatch_detail, idx, 16);
2220 case AARCH64_OPND_SIMD_ADDR_POST:
2221 /* AdvSIMD load/store multiple structures, post-index. */
2223 if (opnd->addr.offset.is_reg)
2225 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
2229 set_other_error (mismatch_detail, idx,
2230 _("invalid register offset"));
2236 const aarch64_opnd_info *prev = &opnds[idx-1];
2237 unsigned num_bytes; /* total number of bytes transferred. */
2238 /* The opcode dependent area stores the number of elements in
2239 each structure to be loaded/stored. */
2240 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
2241 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
2242 /* Special handling of loading single structure to all lane. */
2243 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
2244 * aarch64_get_qualifier_esize (prev->qualifier);
2246 num_bytes = prev->reglist.num_regs
2247 * aarch64_get_qualifier_esize (prev->qualifier)
2248 * aarch64_get_qualifier_nelem (prev->qualifier);
2249 if ((int) num_bytes != opnd->addr.offset.imm)
2251 set_other_error (mismatch_detail, idx,
2252 _("invalid post-increment amount"));
2258 case AARCH64_OPND_ADDR_REGOFF:
2259 /* Get the size of the data element that is accessed, which may be
2260 different from that of the source register size,
2261 e.g. in strb/ldrb. */
2262 size = aarch64_get_qualifier_esize (opnd->qualifier);
2263 /* It is either no shift or shift by the binary logarithm of SIZE. */
2264 if (opnd->shifter.amount != 0
2265 && opnd->shifter.amount != (int)get_logsz (size))
2267 set_other_error (mismatch_detail, idx,
2268 _("invalid shift amount"));
2271 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
2273 switch (opnd->shifter.kind)
2275 case AARCH64_MOD_UXTW:
2276 case AARCH64_MOD_LSL:
2277 case AARCH64_MOD_SXTW:
2278 case AARCH64_MOD_SXTX: break;
2280 set_other_error (mismatch_detail, idx,
2281 _("invalid extend/shift operator"));
2286 case AARCH64_OPND_ADDR_UIMM12:
2287 imm = opnd->addr.offset.imm;
2288 /* Get the size of the data element that is accessed, which may be
2289 different from that of the source register size,
2290 e.g. in strb/ldrb. */
2291 size = aarch64_get_qualifier_esize (qualifier);
2292 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
2294 set_offset_out_of_range_error (mismatch_detail, idx,
2298 if (!value_aligned_p (opnd->addr.offset.imm, size))
2300 set_unaligned_error (mismatch_detail, idx, size);
2305 case AARCH64_OPND_ADDR_PCREL14:
2306 case AARCH64_OPND_ADDR_PCREL19:
2307 case AARCH64_OPND_ADDR_PCREL21:
2308 case AARCH64_OPND_ADDR_PCREL26:
2309 imm = opnd->imm.value;
2310 if (operand_need_shift_by_two (get_operand_from_code (type)))
2312 /* The offset value in a PC-relative branch instruction is alway
2313 4-byte aligned and is encoded without the lowest 2 bits. */
2314 if (!value_aligned_p (imm, 4))
2316 set_unaligned_error (mismatch_detail, idx, 4);
2319 /* Right shift by 2 so that we can carry out the following check
2323 size = get_operand_fields_width (get_operand_from_code (type));
2324 if (!value_fit_signed_field_p (imm, size))
2326 set_other_error (mismatch_detail, idx,
2327 _("immediate out of range"));
2332 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
2333 if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
2335 set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
2340 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
2341 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
2342 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
2343 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
2347 assert (!opnd->addr.offset.is_reg);
2348 assert (opnd->addr.preind);
2349 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
2352 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
2353 || (opnd->shifter.operator_present
2354 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
2356 set_other_error (mismatch_detail, idx,
2357 _("invalid addressing mode"));
2360 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2362 set_offset_out_of_range_error (mismatch_detail, idx,
2363 min_value, max_value);
2366 if (!value_aligned_p (opnd->addr.offset.imm, num))
2368 set_unaligned_error (mismatch_detail, idx, num);
2373 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
2376 goto sve_imm_offset_vl;
2378 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
2381 goto sve_imm_offset_vl;
2383 case AARCH64_OPND_SVE_ADDR_RI_U6:
2384 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
2385 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
2386 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
2390 assert (!opnd->addr.offset.is_reg);
2391 assert (opnd->addr.preind);
2392 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
2395 if (opnd->shifter.operator_present
2396 || opnd->shifter.amount_present)
2398 set_other_error (mismatch_detail, idx,
2399 _("invalid addressing mode"));
2402 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2404 set_offset_out_of_range_error (mismatch_detail, idx,
2405 min_value, max_value);
2408 if (!value_aligned_p (opnd->addr.offset.imm, num))
2410 set_unaligned_error (mismatch_detail, idx, num);
2415 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
2416 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
2419 goto sve_imm_offset;
2421 case AARCH64_OPND_SVE_ADDR_ZX:
2422 /* Everything is already ensured by parse_operands or
2423 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
2425 assert (opnd->addr.offset.is_reg);
2426 assert (opnd->addr.preind);
2427 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
2428 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2429 assert (opnd->shifter.operator_present == 0);
2432 case AARCH64_OPND_SVE_ADDR_R:
2433 case AARCH64_OPND_SVE_ADDR_RR:
2434 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
2435 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
2436 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
2437 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
2438 case AARCH64_OPND_SVE_ADDR_RX:
2439 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
2440 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
2441 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
2442 case AARCH64_OPND_SVE_ADDR_RZ:
2443 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
2444 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
2445 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
2446 modifiers = 1 << AARCH64_MOD_LSL;
2448 assert (opnd->addr.offset.is_reg);
2449 assert (opnd->addr.preind);
2450 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
2451 && opnd->addr.offset.regno == 31)
2453 set_other_error (mismatch_detail, idx,
2454 _("index register xzr is not allowed"));
2457 if (((1 << opnd->shifter.kind) & modifiers) == 0
2458 || (opnd->shifter.amount
2459 != get_operand_specific_data (&aarch64_operands[type])))
2461 set_other_error (mismatch_detail, idx,
2462 _("invalid addressing mode"));
2467 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
2468 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
2469 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
2470 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
2471 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
2472 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
2473 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
2474 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
2475 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
2476 goto sve_rr_operand;
2478 case AARCH64_OPND_SVE_ADDR_ZI_U5:
2479 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
2480 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
2481 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
2484 goto sve_imm_offset;
2486 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
2487 modifiers = 1 << AARCH64_MOD_LSL;
2489 assert (opnd->addr.offset.is_reg);
2490 assert (opnd->addr.preind);
2491 if (((1 << opnd->shifter.kind) & modifiers) == 0
2492 || opnd->shifter.amount < 0
2493 || opnd->shifter.amount > 3)
2495 set_other_error (mismatch_detail, idx,
2496 _("invalid addressing mode"));
2501 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2502 modifiers = (1 << AARCH64_MOD_SXTW);
2503 goto sve_zz_operand;
2505 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2506 modifiers = 1 << AARCH64_MOD_UXTW;
2507 goto sve_zz_operand;
2509 case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
2510 case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
2511 case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
2512 case AARCH64_OPND_RCPC3_ADDR_POSTIND:
2514 int num_bytes = calc_ldst_datasize (opnds);
2515 int abs_offset = (type == AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB
2516 || type == AARCH64_OPND_RCPC3_ADDR_PREIND_WB)
2517 ? opnd->addr.offset.imm * -1
2518 : opnd->addr.offset.imm;
2519 if ((int) num_bytes != abs_offset
2520 && opnd->addr.offset.imm != 0)
2522 set_other_error (mismatch_detail, idx,
2523 _("invalid increment amount"));
2529 case AARCH64_OPND_RCPC3_ADDR_OFFSET:
2530 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
2532 set_imm_out_of_range_error (mismatch_detail, idx, -256, 255);
2541 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2542 if (type == AARCH64_OPND_LEt)
2544 /* Get the upper bound for the element index. */
2545 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2546 if (!value_in_range_p (opnd->reglist.index, 0, num))
2548 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2552 /* The opcode dependent area stores the number of elements in
2553 each structure to be loaded/stored. */
2554 num = get_opcode_dependent_value (opcode);
2557 case AARCH64_OPND_LVt:
2558 assert (num >= 1 && num <= 4);
2559 /* Unless LD1/ST1, the number of registers should be equal to that
2560 of the structure elements. */
2561 if (num != 1 && !check_reglist (opnd, mismatch_detail, idx, num, 1))
2564 case AARCH64_OPND_LVt_AL:
2565 case AARCH64_OPND_LEt:
2566 assert (num >= 1 && num <= 4);
2567 /* The number of registers should be equal to that of the structure
2569 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
2575 if (opnd->reglist.stride != 1)
2577 set_reg_list_stride_error (mismatch_detail, idx, 1);
2582 case AARCH64_OPND_CLASS_IMMEDIATE:
2583 /* Constraint check on immediate operand. */
2584 imm = opnd->imm.value;
2585 /* E.g. imm_0_31 constrains value to be 0..31. */
2586 if (qualifier_value_in_range_constraint_p (qualifier)
2587 && !value_in_range_p (imm, get_lower_bound (qualifier),
2588 get_upper_bound (qualifier)))
2590 set_imm_out_of_range_error (mismatch_detail, idx,
2591 get_lower_bound (qualifier),
2592 get_upper_bound (qualifier));
2598 case AARCH64_OPND_AIMM:
2599 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2601 set_other_error (mismatch_detail, idx,
2602 _("invalid shift operator"));
2605 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2607 set_other_error (mismatch_detail, idx,
2608 _("shift amount must be 0 or 12"));
2611 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2613 set_other_error (mismatch_detail, idx,
2614 _("immediate out of range"));
2619 case AARCH64_OPND_HALF:
2620 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2621 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2623 set_other_error (mismatch_detail, idx,
2624 _("invalid shift operator"));
2627 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2628 if (!value_aligned_p (opnd->shifter.amount, 16))
2630 set_other_error (mismatch_detail, idx,
2631 _("shift amount must be a multiple of 16"));
2634 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2636 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2640 if (opnd->imm.value < 0)
2642 set_other_error (mismatch_detail, idx,
2643 _("negative immediate value not allowed"));
2646 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2648 set_other_error (mismatch_detail, idx,
2649 _("immediate out of range"));
2654 case AARCH64_OPND_IMM_MOV:
2656 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2657 imm = opnd->imm.value;
2661 case OP_MOV_IMM_WIDEN:
2664 case OP_MOV_IMM_WIDE:
2665 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2667 set_other_error (mismatch_detail, idx,
2668 _("immediate out of range"));
2672 case OP_MOV_IMM_LOG:
2673 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2675 set_other_error (mismatch_detail, idx,
2676 _("immediate out of range"));
2687 case AARCH64_OPND_NZCV:
2688 case AARCH64_OPND_CCMP_IMM:
2689 case AARCH64_OPND_EXCEPTION:
2690 case AARCH64_OPND_UNDEFINED:
2691 case AARCH64_OPND_TME_UIMM16:
2692 case AARCH64_OPND_UIMM4:
2693 case AARCH64_OPND_UIMM4_ADDG:
2694 case AARCH64_OPND_UIMM7:
2695 case AARCH64_OPND_UIMM3_OP1:
2696 case AARCH64_OPND_UIMM3_OP2:
2697 case AARCH64_OPND_SVE_UIMM3:
2698 case AARCH64_OPND_SVE_UIMM7:
2699 case AARCH64_OPND_SVE_UIMM8:
2700 case AARCH64_OPND_SVE_UIMM8_53:
2701 case AARCH64_OPND_CSSC_UIMM8:
2702 size = get_operand_fields_width (get_operand_from_code (type));
2704 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2706 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2712 case AARCH64_OPND_UIMM10:
2713 /* Scaled unsigned 10 bits immediate offset. */
2714 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2716 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2720 if (!value_aligned_p (opnd->imm.value, 16))
2722 set_unaligned_error (mismatch_detail, idx, 16);
2727 case AARCH64_OPND_SIMM5:
2728 case AARCH64_OPND_SVE_SIMM5:
2729 case AARCH64_OPND_SVE_SIMM5B:
2730 case AARCH64_OPND_SVE_SIMM6:
2731 case AARCH64_OPND_SVE_SIMM8:
2732 case AARCH64_OPND_CSSC_SIMM8:
2733 size = get_operand_fields_width (get_operand_from_code (type));
2735 if (!value_fit_signed_field_p (opnd->imm.value, size))
2737 set_imm_out_of_range_error (mismatch_detail, idx,
2739 (1 << (size - 1)) - 1);
2744 case AARCH64_OPND_WIDTH:
2745 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2746 && opnds[0].type == AARCH64_OPND_Rd);
2747 size = get_upper_bound (qualifier);
2748 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2749 /* lsb+width <= reg.size */
2751 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2752 size - opnds[idx-1].imm.value);
2757 case AARCH64_OPND_LIMM:
2758 case AARCH64_OPND_SVE_LIMM:
2760 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2761 uint64_t uimm = opnd->imm.value;
2762 if (opcode->op == OP_BIC)
2764 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2766 set_other_error (mismatch_detail, idx,
2767 _("immediate out of range"));
2773 case AARCH64_OPND_IMM0:
2774 case AARCH64_OPND_FPIMM0:
2775 if (opnd->imm.value != 0)
2777 set_other_error (mismatch_detail, idx,
2778 _("immediate zero expected"));
2783 case AARCH64_OPND_IMM_ROT1:
2784 case AARCH64_OPND_IMM_ROT2:
2785 case AARCH64_OPND_SVE_IMM_ROT2:
2786 if (opnd->imm.value != 0
2787 && opnd->imm.value != 90
2788 && opnd->imm.value != 180
2789 && opnd->imm.value != 270)
2791 set_other_error (mismatch_detail, idx,
2792 _("rotate expected to be 0, 90, 180 or 270"));
2797 case AARCH64_OPND_IMM_ROT3:
2798 case AARCH64_OPND_SVE_IMM_ROT1:
2799 case AARCH64_OPND_SVE_IMM_ROT3:
2800 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2802 set_other_error (mismatch_detail, idx,
2803 _("rotate expected to be 90 or 270"));
2808 case AARCH64_OPND_SHLL_IMM:
2810 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2811 if (opnd->imm.value != size)
2813 set_other_error (mismatch_detail, idx,
2814 _("invalid shift amount"));
2819 case AARCH64_OPND_IMM_VLSL:
2820 size = aarch64_get_qualifier_esize (qualifier);
2821 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2823 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2829 case AARCH64_OPND_IMM_VLSR:
2830 size = aarch64_get_qualifier_esize (qualifier);
2831 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2833 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2838 case AARCH64_OPND_SIMD_IMM:
2839 case AARCH64_OPND_SIMD_IMM_SFT:
2840 /* Qualifier check. */
2843 case AARCH64_OPND_QLF_LSL:
2844 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2846 set_other_error (mismatch_detail, idx,
2847 _("invalid shift operator"));
2851 case AARCH64_OPND_QLF_MSL:
2852 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2854 set_other_error (mismatch_detail, idx,
2855 _("invalid shift operator"));
2859 case AARCH64_OPND_QLF_NIL:
2860 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2862 set_other_error (mismatch_detail, idx,
2863 _("shift is not permitted"));
2871 /* Is the immediate valid? */
2873 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2875 /* uimm8 or simm8 */
2876 if (!value_in_range_p (opnd->imm.value, -128, 255))
2878 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2882 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2885 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2886 ffffffffgggggggghhhhhhhh'. */
2887 set_other_error (mismatch_detail, idx,
2888 _("invalid value for immediate"));
2891 /* Is the shift amount valid? */
2892 switch (opnd->shifter.kind)
2894 case AARCH64_MOD_LSL:
2895 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2896 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2898 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2902 if (!value_aligned_p (opnd->shifter.amount, 8))
2904 set_unaligned_error (mismatch_detail, idx, 8);
2908 case AARCH64_MOD_MSL:
2909 /* Only 8 and 16 are valid shift amount. */
2910 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2912 set_other_error (mismatch_detail, idx,
2913 _("shift amount must be 0 or 16"));
2918 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2920 set_other_error (mismatch_detail, idx,
2921 _("invalid shift operator"));
2928 case AARCH64_OPND_FPIMM:
2929 case AARCH64_OPND_SIMD_FPIMM:
2930 case AARCH64_OPND_SVE_FPIMM8:
2931 if (opnd->imm.is_fp == 0)
2933 set_other_error (mismatch_detail, idx,
2934 _("floating-point immediate expected"));
2937 /* The value is expected to be an 8-bit floating-point constant with
2938 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2939 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2941 if (!value_in_range_p (opnd->imm.value, 0, 255))
2943 set_other_error (mismatch_detail, idx,
2944 _("immediate out of range"));
2947 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2949 set_other_error (mismatch_detail, idx,
2950 _("invalid shift operator"));
2955 case AARCH64_OPND_SVE_AIMM:
2958 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2959 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2960 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2961 uvalue = opnd->imm.value;
2962 shift = opnd->shifter.amount;
2967 set_other_error (mismatch_detail, idx,
2968 _("no shift amount allowed for"
2969 " 8-bit constants"));
2975 if (shift != 0 && shift != 8)
2977 set_other_error (mismatch_detail, idx,
2978 _("shift amount must be 0 or 8"));
2981 if (shift == 0 && (uvalue & 0xff) == 0)
2984 uvalue = (int64_t) uvalue / 256;
2988 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2990 set_other_error (mismatch_detail, idx,
2991 _("immediate too big for element size"));
2994 uvalue = (uvalue - min_value) & mask;
2997 set_other_error (mismatch_detail, idx,
2998 _("invalid arithmetic immediate"));
3003 case AARCH64_OPND_SVE_ASIMM:
3007 case AARCH64_OPND_SVE_I1_HALF_ONE:
3008 assert (opnd->imm.is_fp);
3009 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
3011 set_other_error (mismatch_detail, idx,
3012 _("floating-point value must be 0.5 or 1.0"));
3017 case AARCH64_OPND_SVE_I1_HALF_TWO:
3018 assert (opnd->imm.is_fp);
3019 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
3021 set_other_error (mismatch_detail, idx,
3022 _("floating-point value must be 0.5 or 2.0"));
3027 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3028 assert (opnd->imm.is_fp);
3029 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
3031 set_other_error (mismatch_detail, idx,
3032 _("floating-point value must be 0.0 or 1.0"));
3037 case AARCH64_OPND_SVE_INV_LIMM:
3039 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
3040 uint64_t uimm = ~opnd->imm.value;
3041 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
3043 set_other_error (mismatch_detail, idx,
3044 _("immediate out of range"));
3050 case AARCH64_OPND_SVE_LIMM_MOV:
3052 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
3053 uint64_t uimm = opnd->imm.value;
3054 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
3056 set_other_error (mismatch_detail, idx,
3057 _("immediate out of range"));
3060 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
3062 set_other_error (mismatch_detail, idx,
3063 _("invalid replicated MOV immediate"));
3069 case AARCH64_OPND_SVE_PATTERN_SCALED:
3070 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
3071 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
3073 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
3078 case AARCH64_OPND_SVE_SHLIMM_PRED:
3079 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3080 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3081 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
3082 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
3084 set_imm_out_of_range_error (mismatch_detail, idx,
3090 case AARCH64_OPND_SME_SHRIMM4:
3091 size = 1 << get_operand_fields_width (get_operand_from_code (type));
3092 if (!value_in_range_p (opnd->imm.value, 1, size))
3094 set_imm_out_of_range_error (mismatch_detail, idx, 1, size);
3099 case AARCH64_OPND_SME_SHRIMM5:
3100 case AARCH64_OPND_SVE_SHRIMM_PRED:
3101 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3102 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3103 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
3104 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
3105 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
3107 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
3112 case AARCH64_OPND_SME_ZT0_INDEX:
3113 if (!value_in_range_p (opnd->imm.value, 0, 56))
3115 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, 56);
3118 if (opnd->imm.value % 8 != 0)
3120 set_other_error (mismatch_detail, idx,
3121 _("byte index must be a multiple of 8"));
3131 case AARCH64_OPND_CLASS_SYSTEM:
3134 case AARCH64_OPND_PSTATEFIELD:
3135 for (i = 0; aarch64_pstatefields[i].name; ++i)
3136 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3138 assert (aarch64_pstatefields[i].name);
3139 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
3140 max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
3141 if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
3143 set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
3147 case AARCH64_OPND_PRFOP:
3148 if (opcode->iclass == ldst_regoff && opnd->prfop->value >= 24)
3150 set_other_error (mismatch_detail, idx,
3151 _("the register-index form of PRFM does"
3152 " not accept opcodes in the range 24-31"));
3161 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
3162 /* Get the upper bound for the element index. */
3163 if (opcode->op == OP_FCMLA_ELEM)
3164 /* FCMLA index range depends on the vector size of other operands
3165 and is halfed because complex numbers take two elements. */
3166 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
3167 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
3170 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
3171 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
3173 /* Index out-of-range. */
3174 if (!value_in_range_p (opnd->reglane.index, 0, num))
3176 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
3179 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
3180 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
3181 number is encoded in "size:M:Rm":
3187 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
3188 && !value_in_range_p (opnd->reglane.regno, 0, 15))
3190 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
3195 case AARCH64_OPND_CLASS_MODIFIED_REG:
3196 assert (idx == 1 || idx == 2);
3199 case AARCH64_OPND_Rm_EXT:
3200 if (!aarch64_extend_operator_p (opnd->shifter.kind)
3201 && opnd->shifter.kind != AARCH64_MOD_LSL)
3203 set_other_error (mismatch_detail, idx,
3204 _("extend operator expected"));
3207 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
3208 (i.e. SP), in which case it defaults to LSL. The LSL alias is
3209 only valid when "Rd" or "Rn" is '11111', and is preferred in that
3211 if (!aarch64_stack_pointer_p (opnds + 0)
3212 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
3214 if (!opnd->shifter.operator_present)
3216 set_other_error (mismatch_detail, idx,
3217 _("missing extend operator"));
3220 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
3222 set_other_error (mismatch_detail, idx,
3223 _("'LSL' operator not allowed"));
3227 assert (opnd->shifter.operator_present /* Default to LSL. */
3228 || opnd->shifter.kind == AARCH64_MOD_LSL);
3229 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
3231 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
3234 /* In the 64-bit form, the final register operand is written as Wm
3235 for all but the (possibly omitted) UXTX/LSL and SXTX
3237 N.B. GAS allows X register to be used with any operator as a
3238 programming convenience. */
3239 if (qualifier == AARCH64_OPND_QLF_X
3240 && opnd->shifter.kind != AARCH64_MOD_LSL
3241 && opnd->shifter.kind != AARCH64_MOD_UXTX
3242 && opnd->shifter.kind != AARCH64_MOD_SXTX)
3244 set_other_error (mismatch_detail, idx, _("W register expected"));
3249 case AARCH64_OPND_Rm_SFT:
3250 /* ROR is not available to the shifted register operand in
3251 arithmetic instructions. */
3252 if (!aarch64_shift_operator_p (opnd->shifter.kind))
3254 set_other_error (mismatch_detail, idx,
3255 _("shift operator expected"));
3258 if (opnd->shifter.kind == AARCH64_MOD_ROR
3259 && opcode->iclass != log_shift)
3261 set_other_error (mismatch_detail, idx,
3262 _("'ROR' operator not allowed"));
3265 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
3266 if (!value_in_range_p (opnd->shifter.amount, 0, num))
3268 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
3285 /* Main entrypoint for the operand constraint checking.
3287 Return 1 if operands of *INST meet the constraint applied by the operand
3288 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
3289 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
3290 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
3291 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
3292 error kind when it is notified that an instruction does not pass the check).
3294 Un-determined operand qualifiers may get established during the process. */
3297 aarch64_match_operands_constraint (aarch64_inst *inst,
3298 aarch64_operand_error *mismatch_detail)
3302 DEBUG_TRACE ("enter");
3304 i = inst->opcode->tied_operand;
3308 /* Check for tied_operands with specific opcode iclass. */
3309 switch (inst->opcode->iclass)
3311 /* For SME LDR and STR instructions #imm must have the same numerical
3312 value for both operands.
3316 assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array_off4);
3317 assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
3318 if (inst->operands[0].indexed_za.index.imm
3319 != inst->operands[1].addr.offset.imm)
3321 if (mismatch_detail)
3323 mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
3324 mismatch_detail->index = i;
3332 /* Check for cases where a source register needs to be the
3333 same as the destination register. Do this before
3334 matching qualifiers since if an instruction has both
3335 invalid tying and invalid qualifiers, the error about
3336 qualifiers would suggest several alternative instructions
3337 that also have invalid tying. */
3338 enum aarch64_operand_class op_class
3339 = aarch64_get_operand_class (inst->operands[0].type);
3340 assert (aarch64_get_operand_class (inst->operands[i].type)
3342 if (op_class == AARCH64_OPND_CLASS_SVE_REGLIST
3343 ? ((inst->operands[0].reglist.first_regno
3344 != inst->operands[i].reglist.first_regno)
3345 || (inst->operands[0].reglist.num_regs
3346 != inst->operands[i].reglist.num_regs)
3347 || (inst->operands[0].reglist.stride
3348 != inst->operands[i].reglist.stride))
3349 : (inst->operands[0].reg.regno
3350 != inst->operands[i].reg.regno))
3352 if (mismatch_detail)
3354 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
3355 mismatch_detail->index = i;
3356 mismatch_detail->error = NULL;
3365 /* Match operands' qualifier.
3366 *INST has already had qualifier establish for some, if not all, of
3367 its operands; we need to find out whether these established
3368 qualifiers match one of the qualifier sequence in
3369 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
3370 with the corresponding qualifier in such a sequence.
3371 Only basic operand constraint checking is done here; the more thorough
3372 constraint checking will carried out by operand_general_constraint_met_p,
3373 which has be to called after this in order to get all of the operands'
3374 qualifiers established. */
3376 if (match_operands_qualifier (inst, true /* update_p */,
3377 &invalid_count) == 0)
3379 DEBUG_TRACE ("FAIL on operand qualifier matching");
3380 if (mismatch_detail)
3382 /* Return an error type to indicate that it is the qualifier
3383 matching failure; we don't care about which operand as there
3384 are enough information in the opcode table to reproduce it. */
3385 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
3386 mismatch_detail->index = -1;
3387 mismatch_detail->error = NULL;
3388 mismatch_detail->data[0].i = invalid_count;
3393 /* Match operands' constraint. */
3394 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3396 enum aarch64_opnd type = inst->opcode->operands[i];
3397 if (type == AARCH64_OPND_NIL)
3399 if (inst->operands[i].skip)
3401 DEBUG_TRACE ("skip the incomplete operand %d", i);
3404 if (operand_general_constraint_met_p (inst->operands, i, type,
3405 inst->opcode, mismatch_detail) == 0)
3407 DEBUG_TRACE ("FAIL on operand %d", i);
3412 DEBUG_TRACE ("PASS");
3417 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
3418 Also updates the TYPE of each INST->OPERANDS with the corresponding
3419 value of OPCODE->OPERANDS.
3421 Note that some operand qualifiers may need to be manually cleared by
3422 the caller before it further calls the aarch64_opcode_encode; by
3423 doing this, it helps the qualifier matching facilities work
3426 const aarch64_opcode*
3427 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
3430 const aarch64_opcode *old = inst->opcode;
3432 inst->opcode = opcode;
3434 /* Update the operand types. */
3435 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3437 inst->operands[i].type = opcode->operands[i];
3438 if (opcode->operands[i] == AARCH64_OPND_NIL)
3442 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
3448 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
3451 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3452 if (operands[i] == operand)
3454 else if (operands[i] == AARCH64_OPND_NIL)
3459 /* R0...R30, followed by FOR31. */
3460 #define BANK(R, FOR31) \
3461 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
3462 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
3463 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
3464 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
3465 /* [0][0] 32-bit integer regs with sp Wn
3466 [0][1] 64-bit integer regs with sp Xn sf=1
3467 [1][0] 32-bit integer regs with #0 Wn
3468 [1][1] 64-bit integer regs with #0 Xn sf=1 */
3469 static const char *int_reg[2][2][32] = {
3470 #define R32(X) "w" #X
3471 #define R64(X) "x" #X
3472 { BANK (R32, "wsp"), BANK (R64, "sp") },
3473 { BANK (R32, "wzr"), BANK (R64, "xzr") }
3478 /* Names of the SVE vector registers, first with .S suffixes,
3479 then with .D suffixes. */
3481 static const char *sve_reg[2][32] = {
3482 #define ZS(X) "z" #X ".s"
3483 #define ZD(X) "z" #X ".d"
3484 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
3490 /* Return the integer register name.
3491 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
3493 static inline const char *
3494 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
3496 const int has_zr = sp_reg_p ? 0 : 1;
3497 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
3498 return int_reg[has_zr][is_64][regno];
3501 /* Like get_int_reg_name, but IS_64 is always 1. */
3503 static inline const char *
3504 get_64bit_int_reg_name (int regno, int sp_reg_p)
3506 const int has_zr = sp_reg_p ? 0 : 1;
3507 return int_reg[has_zr][1][regno];
3510 /* Get the name of the integer offset register in OPND, using the shift type
3511 to decide whether it's a word or doubleword. */
3513 static inline const char *
3514 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
3516 switch (opnd->shifter.kind)
3518 case AARCH64_MOD_UXTW:
3519 case AARCH64_MOD_SXTW:
3520 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
3522 case AARCH64_MOD_LSL:
3523 case AARCH64_MOD_SXTX:
3524 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
3531 /* Get the name of the SVE vector offset register in OPND, using the operand
3532 qualifier to decide whether the suffix should be .S or .D. */
3534 static inline const char *
3535 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
3537 assert (qualifier == AARCH64_OPND_QLF_S_S
3538 || qualifier == AARCH64_OPND_QLF_S_D);
3539 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
3542 /* Types for expanding an encoded 8-bit value to a floating-point value. */
3562 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
3563 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
3564 (depending on the type of the instruction). IMM8 will be expanded to a
3565 single-precision floating-point value (SIZE == 4) or a double-precision
3566 floating-point value (SIZE == 8). A half-precision floating-point value
3567 (SIZE == 2) is expanded to a single-precision floating-point value. The
3568 expanded value is returned. */
3571 expand_fp_imm (int size, uint32_t imm8)
3574 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3576 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
3577 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
3578 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
3579 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3580 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
3583 imm = (imm8_7 << (63-32)) /* imm8<7> */
3584 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
3585 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3586 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3587 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
3590 else if (size == 4 || size == 2)
3592 imm = (imm8_7 << 31) /* imm8<7> */
3593 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
3594 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
3595 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
3599 /* An unsupported size. */
3606 /* Return a string based on FMT with the register style applied. */
3609 style_reg (struct aarch64_styler *styler, const char *fmt, ...)
3615 txt = styler->apply_style (styler, dis_style_register, fmt, ap);
3621 /* Return a string based on FMT with the immediate style applied. */
3624 style_imm (struct aarch64_styler *styler, const char *fmt, ...)
3630 txt = styler->apply_style (styler, dis_style_immediate, fmt, ap);
3636 /* Return a string based on FMT with the sub-mnemonic style applied. */
3639 style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...)
3645 txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap);
3651 /* Return a string based on FMT with the address style applied. */
3654 style_addr (struct aarch64_styler *styler, const char *fmt, ...)
3660 txt = styler->apply_style (styler, dis_style_address, fmt, ap);
3666 /* Produce the string representation of the register list operand *OPND
3667 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3668 the register name that comes before the register number, such as "v". */
3670 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3671 const char *prefix, struct aarch64_styler *styler)
3673 const int mask = (prefix[0] == 'p' ? 15 : 31);
3674 const int num_regs = opnd->reglist.num_regs;
3675 const int stride = opnd->reglist.stride;
3676 const int first_reg = opnd->reglist.first_regno;
3677 const int last_reg = (first_reg + (num_regs - 1) * stride) & mask;
3678 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3679 char tb[16]; /* Temporary buffer. */
3681 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3682 assert (num_regs >= 1 && num_regs <= 4);
3684 /* Prepare the index if any. */
3685 if (opnd->reglist.has_index)
3686 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3687 snprintf (tb, sizeof (tb), "[%s]",
3688 style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100)));
3692 /* The hyphenated form is preferred for disassembly if there are
3693 more than two registers in the list, and the register numbers
3694 are monotonically increasing in increments of one. */
3695 if (stride == 1 && num_regs > 1
3696 && ((opnd->type != AARCH64_OPND_SME_Zt2)
3697 && (opnd->type != AARCH64_OPND_SME_Zt3)
3698 && (opnd->type != AARCH64_OPND_SME_Zt4)))
3699 snprintf (buf, size, "{%s-%s}%s",
3700 style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name),
3701 style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb);
3704 const int reg0 = first_reg;
3705 const int reg1 = (first_reg + stride) & mask;
3706 const int reg2 = (first_reg + stride * 2) & mask;
3707 const int reg3 = (first_reg + stride * 3) & mask;
3712 snprintf (buf, size, "{%s}%s",
3713 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3717 snprintf (buf, size, "{%s, %s}%s",
3718 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3719 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3723 snprintf (buf, size, "{%s, %s, %s}%s",
3724 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3725 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3726 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3730 snprintf (buf, size, "{%s, %s, %s, %s}%s",
3731 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3732 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3733 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3734 style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name),
3741 /* Print the register+immediate address in OPND to BUF, which has SIZE
3742 characters. BASE is the name of the base register. */
3745 print_immediate_offset_address (char *buf, size_t size,
3746 const aarch64_opnd_info *opnd,
3748 struct aarch64_styler *styler)
3750 if (opnd->addr.writeback)
3752 if (opnd->addr.preind)
3754 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3755 snprintf (buf, size, "[%s]!", style_reg (styler, base));
3757 snprintf (buf, size, "[%s, %s]!",
3758 style_reg (styler, base),
3759 style_imm (styler, "#%d", opnd->addr.offset.imm));
3762 snprintf (buf, size, "[%s], %s",
3763 style_reg (styler, base),
3764 style_imm (styler, "#%d", opnd->addr.offset.imm));
3768 if (opnd->shifter.operator_present)
3770 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3771 snprintf (buf, size, "[%s, %s, %s]",
3772 style_reg (styler, base),
3773 style_imm (styler, "#%d", opnd->addr.offset.imm),
3774 style_sub_mnem (styler, "mul vl"));
3776 else if (opnd->addr.offset.imm)
3777 snprintf (buf, size, "[%s, %s]",
3778 style_reg (styler, base),
3779 style_imm (styler, "#%d", opnd->addr.offset.imm));
3781 snprintf (buf, size, "[%s]", style_reg (styler, base));
3785 /* Produce the string representation of the register offset address operand
3786 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3787 the names of the base and offset registers. */
3789 print_register_offset_address (char *buf, size_t size,
3790 const aarch64_opnd_info *opnd,
3791 const char *base, const char *offset,
3792 struct aarch64_styler *styler)
3794 char tb[32]; /* Temporary buffer. */
3795 bool print_extend_p = true;
3796 bool print_amount_p = true;
3797 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3799 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3800 || !opnd->shifter.amount_present))
3802 /* Not print the shift/extend amount when the amount is zero and
3803 when it is not the special case of 8-bit load/store instruction. */
3804 print_amount_p = false;
3805 /* Likewise, no need to print the shift operator LSL in such a
3807 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3808 print_extend_p = false;
3811 /* Prepare for the extend/shift. */
3815 snprintf (tb, sizeof (tb), ", %s %s",
3816 style_sub_mnem (styler, shift_name),
3817 style_imm (styler, "#%" PRIi64,
3818 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3819 (opnd->shifter.amount % 100)));
3821 snprintf (tb, sizeof (tb), ", %s",
3822 style_sub_mnem (styler, shift_name));
3827 snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base),
3828 style_reg (styler, offset), tb);
3831 /* Print ZA tiles from imm8 in ZERO instruction.
3833 The preferred disassembly of this instruction uses the shortest list of tile
3834 names that represent the encoded immediate mask.
3837 * An all-ones immediate is disassembled as {ZA}.
3838 * An all-zeros immediate is disassembled as an empty list { }.
3841 print_sme_za_list (char *buf, size_t size, int mask,
3842 struct aarch64_styler *styler)
3844 const char* zan[] = { "za", "za0.h", "za1.h", "za0.s",
3845 "za1.s", "za2.s", "za3.s", "za0.d",
3846 "za1.d", "za2.d", "za3.d", "za4.d",
3847 "za5.d", "za6.d", "za7.d", " " };
3848 const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
3849 0x22, 0x44, 0x88, 0x01,
3850 0x02, 0x04, 0x08, 0x10,
3851 0x20, 0x40, 0x80, 0x00 };
3853 const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
3855 k = snprintf (buf, size, "{");
3856 for (i = 0; i < ZAN_SIZE; i++)
3858 if ((mask & zan_v[i]) == zan_v[i])
3862 k += snprintf (buf + k, size - k, ", ");
3864 k += snprintf (buf + k, size - k, "%s", style_reg (styler, zan[i]));
3869 snprintf (buf + k, size - k, "}");
3872 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3873 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3874 PC, PCREL_P and ADDRESS are used to pass in and return information about
3875 the PC-relative address calculation, where the PC value is passed in
3876 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3877 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3878 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3880 The function serves both the disassembler and the assembler diagnostics
3881 issuer, which is the reason why it lives in this file. */
3884 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3885 const aarch64_opcode *opcode,
3886 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3887 bfd_vma *address, char** notes,
3888 char *comment, size_t comment_size,
3889 aarch64_feature_set features,
3890 struct aarch64_styler *styler)
3892 unsigned int i, num_conds;
3893 const char *name = NULL;
3894 const aarch64_opnd_info *opnd = opnds + idx;
3895 enum aarch64_modifier_kind kind;
3896 uint64_t addr, enum_value;
3898 if (comment != NULL)
3900 assert (comment_size > 0);
3904 assert (comment_size == 0);
3912 case AARCH64_OPND_Rd:
3913 case AARCH64_OPND_Rn:
3914 case AARCH64_OPND_Rm:
3915 case AARCH64_OPND_Rt:
3916 case AARCH64_OPND_Rt2:
3917 case AARCH64_OPND_Rs:
3918 case AARCH64_OPND_Ra:
3919 case AARCH64_OPND_Rt_LS64:
3920 case AARCH64_OPND_Rt_SYS:
3921 case AARCH64_OPND_PAIRREG:
3922 case AARCH64_OPND_PAIRREG_OR_XZR:
3923 case AARCH64_OPND_SVE_Rm:
3924 case AARCH64_OPND_LSE128_Rt:
3925 case AARCH64_OPND_LSE128_Rt2:
3926 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3927 the <ic_op>, therefore we use opnd->present to override the
3928 generic optional-ness information. */
3929 if (opnd->type == AARCH64_OPND_Rt_SYS)
3934 /* Omit the operand, e.g. RET. */
3935 else if (optional_operand_p (opcode, idx)
3937 == get_optional_operand_default_value (opcode)))
3939 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3940 || opnd->qualifier == AARCH64_OPND_QLF_X);
3941 snprintf (buf, size, "%s",
3942 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3943 opnd->qualifier, 0)));
3946 case AARCH64_OPND_Rd_SP:
3947 case AARCH64_OPND_Rn_SP:
3948 case AARCH64_OPND_Rt_SP:
3949 case AARCH64_OPND_SVE_Rn_SP:
3950 case AARCH64_OPND_Rm_SP:
3951 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3952 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3953 || opnd->qualifier == AARCH64_OPND_QLF_X
3954 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3955 snprintf (buf, size, "%s",
3956 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3957 opnd->qualifier, 1)));
3960 case AARCH64_OPND_Rm_EXT:
3961 kind = opnd->shifter.kind;
3962 assert (idx == 1 || idx == 2);
3963 if ((aarch64_stack_pointer_p (opnds)
3964 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3965 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3966 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3967 && kind == AARCH64_MOD_UXTW)
3968 || (opnd->qualifier == AARCH64_OPND_QLF_X
3969 && kind == AARCH64_MOD_UXTX)))
3971 /* 'LSL' is the preferred form in this case. */
3972 kind = AARCH64_MOD_LSL;
3973 if (opnd->shifter.amount == 0)
3975 /* Shifter omitted. */
3976 snprintf (buf, size, "%s",
3978 get_int_reg_name (opnd->reg.regno,
3979 opnd->qualifier, 0)));
3983 if (opnd->shifter.amount)
3984 snprintf (buf, size, "%s, %s %s",
3985 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3986 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name),
3987 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3989 snprintf (buf, size, "%s, %s",
3990 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3991 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name));
3994 case AARCH64_OPND_Rm_SFT:
3995 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3996 || opnd->qualifier == AARCH64_OPND_QLF_X);
3997 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3998 snprintf (buf, size, "%s",
3999 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4000 opnd->qualifier, 0)));
4002 snprintf (buf, size, "%s, %s %s",
4003 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
4004 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
4005 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4008 case AARCH64_OPND_Fd:
4009 case AARCH64_OPND_Fn:
4010 case AARCH64_OPND_Fm:
4011 case AARCH64_OPND_Fa:
4012 case AARCH64_OPND_Ft:
4013 case AARCH64_OPND_Ft2:
4014 case AARCH64_OPND_Sd:
4015 case AARCH64_OPND_Sn:
4016 case AARCH64_OPND_Sm:
4017 case AARCH64_OPND_SVE_VZn:
4018 case AARCH64_OPND_SVE_Vd:
4019 case AARCH64_OPND_SVE_Vm:
4020 case AARCH64_OPND_SVE_Vn:
4021 snprintf (buf, size, "%s",
4022 style_reg (styler, "%s%d",
4023 aarch64_get_qualifier_name (opnd->qualifier),
4027 case AARCH64_OPND_Va:
4028 case AARCH64_OPND_Vd:
4029 case AARCH64_OPND_Vn:
4030 case AARCH64_OPND_Vm:
4031 snprintf (buf, size, "%s",
4032 style_reg (styler, "v%d.%s", opnd->reg.regno,
4033 aarch64_get_qualifier_name (opnd->qualifier)));
4036 case AARCH64_OPND_Ed:
4037 case AARCH64_OPND_En:
4038 case AARCH64_OPND_Em:
4039 case AARCH64_OPND_Em16:
4040 case AARCH64_OPND_SM3_IMM2:
4041 snprintf (buf, size, "%s[%s]",
4042 style_reg (styler, "v%d.%s", opnd->reglane.regno,
4043 aarch64_get_qualifier_name (opnd->qualifier)),
4044 style_imm (styler, "%" PRIi64, opnd->reglane.index));
4047 case AARCH64_OPND_VdD1:
4048 case AARCH64_OPND_VnD1:
4049 snprintf (buf, size, "%s[%s]",
4050 style_reg (styler, "v%d.d", opnd->reg.regno),
4051 style_imm (styler, "1"));
4054 case AARCH64_OPND_LVn:
4055 case AARCH64_OPND_LVt:
4056 case AARCH64_OPND_LVt_AL:
4057 case AARCH64_OPND_LEt:
4058 print_register_list (buf, size, opnd, "v", styler);
4061 case AARCH64_OPND_SVE_Pd:
4062 case AARCH64_OPND_SVE_Pg3:
4063 case AARCH64_OPND_SVE_Pg4_5:
4064 case AARCH64_OPND_SVE_Pg4_10:
4065 case AARCH64_OPND_SVE_Pg4_16:
4066 case AARCH64_OPND_SVE_Pm:
4067 case AARCH64_OPND_SVE_Pn:
4068 case AARCH64_OPND_SVE_Pt:
4069 case AARCH64_OPND_SME_Pm:
4070 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
4071 snprintf (buf, size, "%s",
4072 style_reg (styler, "p%d", opnd->reg.regno));
4073 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
4074 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
4075 snprintf (buf, size, "%s",
4076 style_reg (styler, "p%d/%s", opnd->reg.regno,
4077 aarch64_get_qualifier_name (opnd->qualifier)));
4079 snprintf (buf, size, "%s",
4080 style_reg (styler, "p%d.%s", opnd->reg.regno,
4081 aarch64_get_qualifier_name (opnd->qualifier)));
4084 case AARCH64_OPND_SVE_PNd:
4085 case AARCH64_OPND_SVE_PNg4_10:
4086 case AARCH64_OPND_SVE_PNn:
4087 case AARCH64_OPND_SVE_PNt:
4088 case AARCH64_OPND_SME_PNd3:
4089 case AARCH64_OPND_SME_PNg3:
4090 case AARCH64_OPND_SME_PNn:
4091 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
4092 snprintf (buf, size, "%s",
4093 style_reg (styler, "pn%d", opnd->reg.regno));
4094 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
4095 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
4096 snprintf (buf, size, "%s",
4097 style_reg (styler, "pn%d/%s", opnd->reg.regno,
4098 aarch64_get_qualifier_name (opnd->qualifier)));
4100 snprintf (buf, size, "%s",
4101 style_reg (styler, "pn%d.%s", opnd->reg.regno,
4102 aarch64_get_qualifier_name (opnd->qualifier)));
4105 case AARCH64_OPND_SME_Pdx2:
4106 case AARCH64_OPND_SME_PdxN:
4107 print_register_list (buf, size, opnd, "p", styler);
4110 case AARCH64_OPND_SME_PNn3_INDEX1:
4111 case AARCH64_OPND_SME_PNn3_INDEX2:
4112 snprintf (buf, size, "%s[%s]",
4113 style_reg (styler, "pn%d", opnd->reglane.regno),
4114 style_imm (styler, "%" PRIi64, opnd->reglane.index));
4117 case AARCH64_OPND_SVE_Za_5:
4118 case AARCH64_OPND_SVE_Za_16:
4119 case AARCH64_OPND_SVE_Zd:
4120 case AARCH64_OPND_SVE_Zm_5:
4121 case AARCH64_OPND_SVE_Zm_16:
4122 case AARCH64_OPND_SVE_Zn:
4123 case AARCH64_OPND_SVE_Zt:
4124 case AARCH64_OPND_SME_Zm:
4125 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
4126 snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
4128 snprintf (buf, size, "%s",
4129 style_reg (styler, "z%d.%s", opnd->reg.regno,
4130 aarch64_get_qualifier_name (opnd->qualifier)));
4133 case AARCH64_OPND_SVE_ZnxN:
4134 case AARCH64_OPND_SVE_ZtxN:
4135 case AARCH64_OPND_SME_Zdnx2:
4136 case AARCH64_OPND_SME_Zdnx4:
4137 case AARCH64_OPND_SME_Zmx2:
4138 case AARCH64_OPND_SME_Zmx4:
4139 case AARCH64_OPND_SME_Znx2:
4140 case AARCH64_OPND_SME_Znx4:
4141 case AARCH64_OPND_SME_Ztx2_STRIDED:
4142 case AARCH64_OPND_SME_Ztx4_STRIDED:
4143 case AARCH64_OPND_SME_Zt2:
4144 case AARCH64_OPND_SME_Zt3:
4145 case AARCH64_OPND_SME_Zt4:
4146 print_register_list (buf, size, opnd, "z", styler);
4149 case AARCH64_OPND_SVE_Zm3_INDEX:
4150 case AARCH64_OPND_SVE_Zm3_22_INDEX:
4151 case AARCH64_OPND_SVE_Zm3_19_INDEX:
4152 case AARCH64_OPND_SVE_Zm3_11_INDEX:
4153 case AARCH64_OPND_SVE_Zm4_11_INDEX:
4154 case AARCH64_OPND_SVE_Zm4_INDEX:
4155 case AARCH64_OPND_SVE_Zn_INDEX:
4156 case AARCH64_OPND_SME_Zm_INDEX1:
4157 case AARCH64_OPND_SME_Zm_INDEX2:
4158 case AARCH64_OPND_SME_Zm_INDEX3_1:
4159 case AARCH64_OPND_SME_Zm_INDEX3_2:
4160 case AARCH64_OPND_SME_Zm_INDEX3_10:
4161 case AARCH64_OPND_SVE_Zn_5_INDEX:
4162 case AARCH64_OPND_SME_Zm_INDEX4_1:
4163 case AARCH64_OPND_SME_Zm_INDEX4_10:
4164 case AARCH64_OPND_SME_Zn_INDEX1_16:
4165 case AARCH64_OPND_SME_Zn_INDEX2_15:
4166 case AARCH64_OPND_SME_Zn_INDEX2_16:
4167 case AARCH64_OPND_SME_Zn_INDEX3_14:
4168 case AARCH64_OPND_SME_Zn_INDEX3_15:
4169 case AARCH64_OPND_SME_Zn_INDEX4_14:
4170 case AARCH64_OPND_SVE_Zm_imm4:
4171 snprintf (buf, size, "%s[%s]",
4172 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4173 ? style_reg (styler, "z%d", opnd->reglane.regno)
4174 : style_reg (styler, "z%d.%s", opnd->reglane.regno,
4175 aarch64_get_qualifier_name (opnd->qualifier))),
4176 style_imm (styler, "%" PRIi64, opnd->reglane.index));
4179 case AARCH64_OPND_SME_ZAda_2b:
4180 case AARCH64_OPND_SME_ZAda_3b:
4181 snprintf (buf, size, "%s",
4182 style_reg (styler, "za%d.%s", opnd->reg.regno,
4183 aarch64_get_qualifier_name (opnd->qualifier)));
4186 case AARCH64_OPND_SME_ZA_HV_idx_src:
4187 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
4188 case AARCH64_OPND_SME_ZA_HV_idx_dest:
4189 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
4190 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
4191 snprintf (buf, size, "%s%s[%s, %s%s%s%s%s]%s",
4192 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
4193 style_reg (styler, "za%d%c.%s",
4194 opnd->indexed_za.regno,
4195 opnd->indexed_za.v == 1 ? 'v' : 'h',
4196 aarch64_get_qualifier_name (opnd->qualifier)),
4197 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4198 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4199 opnd->indexed_za.index.countm1 ? ":" : "",
4200 (opnd->indexed_za.index.countm1
4201 ? style_imm (styler, "%d",
4202 opnd->indexed_za.index.imm
4203 + opnd->indexed_za.index.countm1)
4205 opnd->indexed_za.group_size ? ", " : "",
4206 opnd->indexed_za.group_size == 2
4207 ? style_sub_mnem (styler, "vgx2")
4208 : opnd->indexed_za.group_size == 4
4209 ? style_sub_mnem (styler, "vgx4") : "",
4210 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
4213 case AARCH64_OPND_SME_list_of_64bit_tiles:
4214 print_sme_za_list (buf, size, opnd->reg.regno, styler);
4217 case AARCH64_OPND_SME_ZA_array_off1x4:
4218 case AARCH64_OPND_SME_ZA_array_off2x2:
4219 case AARCH64_OPND_SME_ZA_array_off2x4:
4220 case AARCH64_OPND_SME_ZA_array_off3_0:
4221 case AARCH64_OPND_SME_ZA_array_off3_5:
4222 case AARCH64_OPND_SME_ZA_array_off3x2:
4223 case AARCH64_OPND_SME_ZA_array_off4:
4224 snprintf (buf, size, "%s[%s, %s%s%s%s%s]",
4225 style_reg (styler, "za%s%s",
4226 opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
4227 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4229 : aarch64_get_qualifier_name (opnd->qualifier))),
4230 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4231 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4232 opnd->indexed_za.index.countm1 ? ":" : "",
4233 (opnd->indexed_za.index.countm1
4234 ? style_imm (styler, "%d",
4235 opnd->indexed_za.index.imm
4236 + opnd->indexed_za.index.countm1)
4238 opnd->indexed_za.group_size ? ", " : "",
4239 opnd->indexed_za.group_size == 2
4240 ? style_sub_mnem (styler, "vgx2")
4241 : opnd->indexed_za.group_size == 4
4242 ? style_sub_mnem (styler, "vgx4") : "");
4245 case AARCH64_OPND_SME_ZA_array_vrsb_1:
4246 case AARCH64_OPND_SME_ZA_array_vrsh_1:
4247 case AARCH64_OPND_SME_ZA_array_vrss_1:
4248 case AARCH64_OPND_SME_ZA_array_vrsd_1:
4249 case AARCH64_OPND_SME_ZA_array_vrsb_2:
4250 case AARCH64_OPND_SME_ZA_array_vrsh_2:
4251 case AARCH64_OPND_SME_ZA_array_vrss_2:
4252 case AARCH64_OPND_SME_ZA_array_vrsd_2:
4253 snprintf (buf, size, "%s [%s, %s%s%s]",
4254 style_reg (styler, "za%d%c%s%s",
4255 opnd->indexed_za.regno,
4256 opnd->indexed_za.v ? 'v': 'h',
4257 opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
4258 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4260 : aarch64_get_qualifier_name (opnd->qualifier))),
4261 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4262 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4263 opnd->indexed_za.index.countm1 ? ":" : "",
4264 opnd->indexed_za.index.countm1 ? style_imm (styler, "%d",
4265 opnd->indexed_za.index.imm
4266 + opnd->indexed_za.index.countm1):"");
4269 case AARCH64_OPND_SME_SM_ZA:
4270 snprintf (buf, size, "%s",
4271 style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za"));
4274 case AARCH64_OPND_SME_PnT_Wm_imm:
4275 snprintf (buf, size, "%s[%s, %s]",
4276 style_reg (styler, "p%d.%s", opnd->indexed_za.regno,
4277 aarch64_get_qualifier_name (opnd->qualifier)),
4278 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4279 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm));
4282 case AARCH64_OPND_SME_VLxN_10:
4283 case AARCH64_OPND_SME_VLxN_13:
4284 enum_value = opnd->imm.value;
4285 assert (enum_value < ARRAY_SIZE (aarch64_sme_vlxn_array));
4286 snprintf (buf, size, "%s",
4287 style_sub_mnem (styler, aarch64_sme_vlxn_array[enum_value]));
4290 case AARCH64_OPND_CRn:
4291 case AARCH64_OPND_CRm:
4292 snprintf (buf, size, "%s",
4293 style_reg (styler, "C%" PRIi64, opnd->imm.value));
4296 case AARCH64_OPND_IDX:
4297 case AARCH64_OPND_MASK:
4298 case AARCH64_OPND_IMM:
4299 case AARCH64_OPND_IMM_2:
4300 case AARCH64_OPND_WIDTH:
4301 case AARCH64_OPND_UIMM3_OP1:
4302 case AARCH64_OPND_UIMM3_OP2:
4303 case AARCH64_OPND_BIT_NUM:
4304 case AARCH64_OPND_IMM_VLSL:
4305 case AARCH64_OPND_IMM_VLSR:
4306 case AARCH64_OPND_SHLL_IMM:
4307 case AARCH64_OPND_IMM0:
4308 case AARCH64_OPND_IMMR:
4309 case AARCH64_OPND_IMMS:
4310 case AARCH64_OPND_UNDEFINED:
4311 case AARCH64_OPND_FBITS:
4312 case AARCH64_OPND_TME_UIMM16:
4313 case AARCH64_OPND_SIMM5:
4314 case AARCH64_OPND_SME_SHRIMM4:
4315 case AARCH64_OPND_SME_SHRIMM5:
4316 case AARCH64_OPND_SVE_SHLIMM_PRED:
4317 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
4318 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
4319 case AARCH64_OPND_SVE_SHRIMM_PRED:
4320 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
4321 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
4322 case AARCH64_OPND_SVE_SIMM5:
4323 case AARCH64_OPND_SVE_SIMM5B:
4324 case AARCH64_OPND_SVE_SIMM6:
4325 case AARCH64_OPND_SVE_SIMM8:
4326 case AARCH64_OPND_SVE_UIMM3:
4327 case AARCH64_OPND_SVE_UIMM7:
4328 case AARCH64_OPND_SVE_UIMM8:
4329 case AARCH64_OPND_SVE_UIMM8_53:
4330 case AARCH64_OPND_IMM_ROT1:
4331 case AARCH64_OPND_IMM_ROT2:
4332 case AARCH64_OPND_IMM_ROT3:
4333 case AARCH64_OPND_SVE_IMM_ROT1:
4334 case AARCH64_OPND_SVE_IMM_ROT2:
4335 case AARCH64_OPND_SVE_IMM_ROT3:
4336 case AARCH64_OPND_CSSC_SIMM8:
4337 case AARCH64_OPND_CSSC_UIMM8:
4338 snprintf (buf, size, "%s",
4339 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4342 case AARCH64_OPND_SVE_I1_HALF_ONE:
4343 case AARCH64_OPND_SVE_I1_HALF_TWO:
4344 case AARCH64_OPND_SVE_I1_ZERO_ONE:
4347 c.i = opnd->imm.value;
4348 snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f));
4352 case AARCH64_OPND_SVE_PATTERN:
4353 if (optional_operand_p (opcode, idx)
4354 && opnd->imm.value == get_optional_operand_default_value (opcode))
4356 enum_value = opnd->imm.value;
4357 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
4358 if (aarch64_sve_pattern_array[enum_value])
4359 snprintf (buf, size, "%s",
4360 style_reg (styler, aarch64_sve_pattern_array[enum_value]));
4362 snprintf (buf, size, "%s",
4363 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4366 case AARCH64_OPND_SVE_PATTERN_SCALED:
4367 if (optional_operand_p (opcode, idx)
4368 && !opnd->shifter.operator_present
4369 && opnd->imm.value == get_optional_operand_default_value (opcode))
4371 enum_value = opnd->imm.value;
4372 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
4373 if (aarch64_sve_pattern_array[opnd->imm.value])
4374 snprintf (buf, size, "%s",
4376 aarch64_sve_pattern_array[opnd->imm.value]));
4378 snprintf (buf, size, "%s",
4379 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4380 if (opnd->shifter.operator_present)
4382 size_t len = strlen (buf);
4383 const char *shift_name
4384 = aarch64_operand_modifiers[opnd->shifter.kind].name;
4385 snprintf (buf + len, size - len, ", %s %s",
4386 style_sub_mnem (styler, shift_name),
4387 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4391 case AARCH64_OPND_SVE_PRFOP:
4392 enum_value = opnd->imm.value;
4393 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
4394 if (aarch64_sve_prfop_array[enum_value])
4395 snprintf (buf, size, "%s",
4396 style_reg (styler, aarch64_sve_prfop_array[enum_value]));
4398 snprintf (buf, size, "%s",
4399 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4402 case AARCH64_OPND_IMM_MOV:
4403 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
4405 case 4: /* e.g. MOV Wd, #<imm32>. */
4407 int imm32 = opnd->imm.value;
4408 snprintf (buf, size, "%s",
4409 style_imm (styler, "#0x%-20x", imm32));
4410 snprintf (comment, comment_size, "#%d", imm32);
4413 case 8: /* e.g. MOV Xd, #<imm64>. */
4414 snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64,
4416 snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
4419 snprintf (buf, size, "<invalid>");
4424 case AARCH64_OPND_FPIMM0:
4425 snprintf (buf, size, "%s", style_imm (styler, "#0.0"));
4428 case AARCH64_OPND_LIMM:
4429 case AARCH64_OPND_AIMM:
4430 case AARCH64_OPND_HALF:
4431 case AARCH64_OPND_SVE_INV_LIMM:
4432 case AARCH64_OPND_SVE_LIMM:
4433 case AARCH64_OPND_SVE_LIMM_MOV:
4434 if (opnd->shifter.amount)
4435 snprintf (buf, size, "%s, %s %s",
4436 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
4437 style_sub_mnem (styler, "lsl"),
4438 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4440 snprintf (buf, size, "%s",
4441 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
4444 case AARCH64_OPND_SIMD_IMM:
4445 case AARCH64_OPND_SIMD_IMM_SFT:
4446 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
4447 || opnd->shifter.kind == AARCH64_MOD_NONE)
4448 snprintf (buf, size, "%s",
4449 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
4451 snprintf (buf, size, "%s, %s %s",
4452 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
4453 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
4454 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4457 case AARCH64_OPND_SVE_AIMM:
4458 case AARCH64_OPND_SVE_ASIMM:
4459 if (opnd->shifter.amount)
4460 snprintf (buf, size, "%s, %s %s",
4461 style_imm (styler, "#%" PRIi64, opnd->imm.value),
4462 style_sub_mnem (styler, "lsl"),
4463 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4465 snprintf (buf, size, "%s",
4466 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4469 case AARCH64_OPND_FPIMM:
4470 case AARCH64_OPND_SIMD_FPIMM:
4471 case AARCH64_OPND_SVE_FPIMM8:
4472 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
4474 case 2: /* e.g. FMOV <Hd>, #<imm>. */
4477 c.i = expand_fp_imm (2, opnd->imm.value);
4478 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
4481 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
4484 c.i = expand_fp_imm (4, opnd->imm.value);
4485 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
4488 case 8: /* e.g. FMOV <Sd>, #<imm>. */
4491 c.i = expand_fp_imm (8, opnd->imm.value);
4492 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d));
4496 snprintf (buf, size, "<invalid>");
4501 case AARCH64_OPND_CCMP_IMM:
4502 case AARCH64_OPND_NZCV:
4503 case AARCH64_OPND_EXCEPTION:
4504 case AARCH64_OPND_UIMM4:
4505 case AARCH64_OPND_UIMM4_ADDG:
4506 case AARCH64_OPND_UIMM7:
4507 case AARCH64_OPND_UIMM10:
4508 if (optional_operand_p (opcode, idx)
4509 && (opnd->imm.value ==
4510 (int64_t) get_optional_operand_default_value (opcode)))
4511 /* Omit the operand, e.g. DCPS1. */
4513 snprintf (buf, size, "%s",
4514 style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value));
4517 case AARCH64_OPND_COND:
4518 case AARCH64_OPND_COND1:
4519 snprintf (buf, size, "%s",
4520 style_sub_mnem (styler, opnd->cond->names[0]));
4521 num_conds = ARRAY_SIZE (opnd->cond->names);
4522 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
4524 size_t len = comment != NULL ? strlen (comment) : 0;
4526 snprintf (comment + len, comment_size - len, "%s = %s",
4527 opnd->cond->names[0], opnd->cond->names[i]);
4529 snprintf (comment + len, comment_size - len, ", %s",
4530 opnd->cond->names[i]);
4534 case AARCH64_OPND_ADDR_ADRP:
4535 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
4541 /* This is not necessary during the disassembling, as print_address_func
4542 in the disassemble_info will take care of the printing. But some
4543 other callers may be still interested in getting the string in *STR,
4544 so here we do snprintf regardless. */
4545 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr));
4548 case AARCH64_OPND_ADDR_PCREL14:
4549 case AARCH64_OPND_ADDR_PCREL19:
4550 case AARCH64_OPND_ADDR_PCREL21:
4551 case AARCH64_OPND_ADDR_PCREL26:
4552 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
4557 /* This is not necessary during the disassembling, as print_address_func
4558 in the disassemble_info will take care of the printing. But some
4559 other callers may be still interested in getting the string in *STR,
4560 so here we do snprintf regardless. */
4561 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr));
4564 case AARCH64_OPND_ADDR_SIMPLE:
4565 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
4566 case AARCH64_OPND_SIMD_ADDR_POST:
4567 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4568 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
4570 if (opnd->addr.offset.is_reg)
4571 snprintf (buf, size, "[%s], %s",
4572 style_reg (styler, name),
4573 style_reg (styler, "x%d", opnd->addr.offset.regno));
4575 snprintf (buf, size, "[%s], %s",
4576 style_reg (styler, name),
4577 style_imm (styler, "#%d", opnd->addr.offset.imm));
4580 snprintf (buf, size, "[%s]", style_reg (styler, name));
4583 case AARCH64_OPND_ADDR_REGOFF:
4584 case AARCH64_OPND_SVE_ADDR_R:
4585 case AARCH64_OPND_SVE_ADDR_RR:
4586 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
4587 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
4588 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
4589 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
4590 case AARCH64_OPND_SVE_ADDR_RX:
4591 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
4592 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
4593 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
4594 print_register_offset_address
4595 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4596 get_offset_int_reg_name (opnd), styler);
4599 case AARCH64_OPND_SVE_ADDR_ZX:
4600 print_register_offset_address
4602 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4603 get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler);
4606 case AARCH64_OPND_SVE_ADDR_RZ:
4607 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
4608 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
4609 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
4610 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
4611 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
4612 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
4613 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
4614 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
4615 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
4616 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
4617 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
4618 print_register_offset_address
4619 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4620 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4624 case AARCH64_OPND_ADDR_SIMM7:
4625 case AARCH64_OPND_ADDR_SIMM9:
4626 case AARCH64_OPND_ADDR_SIMM9_2:
4627 case AARCH64_OPND_ADDR_SIMM10:
4628 case AARCH64_OPND_ADDR_SIMM11:
4629 case AARCH64_OPND_ADDR_SIMM13:
4630 case AARCH64_OPND_RCPC3_ADDR_OFFSET:
4631 case AARCH64_OPND_ADDR_OFFSET:
4632 case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
4633 case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
4634 case AARCH64_OPND_RCPC3_ADDR_POSTIND:
4635 case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
4636 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
4637 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
4638 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
4639 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
4640 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
4641 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
4642 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
4643 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
4644 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
4645 case AARCH64_OPND_SVE_ADDR_RI_U6:
4646 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
4647 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
4648 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
4649 print_immediate_offset_address
4650 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4654 case AARCH64_OPND_SVE_ADDR_ZI_U5:
4655 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
4656 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
4657 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
4658 print_immediate_offset_address
4660 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4664 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
4665 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
4666 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
4667 print_register_offset_address
4669 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4670 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4674 case AARCH64_OPND_ADDR_UIMM12:
4675 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4676 if (opnd->addr.offset.imm)
4677 snprintf (buf, size, "[%s, %s]",
4678 style_reg (styler, name),
4679 style_imm (styler, "#%d", opnd->addr.offset.imm));
4681 snprintf (buf, size, "[%s]", style_reg (styler, name));
4684 case AARCH64_OPND_SYSREG:
4685 case AARCH64_OPND_SYSREG128:
4686 for (i = 0; aarch64_sys_regs[i].name; ++i)
4688 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
4691 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
4692 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
4693 && AARCH64_CPU_HAS_ALL_FEATURES (features, sr->features);
4695 /* Try and find an exact match, But if that fails, return the first
4696 partial match that was found. */
4697 if (aarch64_sys_regs[i].value == opnd->sysreg.value
4698 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
4699 && ! aarch64_sys_reg_alias_p (aarch64_sys_regs[i].flags)
4700 && (name == NULL || exact_match))
4702 name = aarch64_sys_regs[i].name;
4710 /* If we didn't match exactly, that means the presense of a flag
4711 indicates what we didn't want for this instruction. e.g. If
4712 F_REG_READ is there, that means we were looking for a write
4713 register. See aarch64_ext_sysreg. */
4714 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
4715 *notes = _("reading from a write-only register");
4716 else if (aarch64_sys_regs[i].flags & F_REG_READ)
4717 *notes = _("writing to a read-only register");
4722 snprintf (buf, size, "%s", style_reg (styler, name));
4725 /* Implementation defined system register. */
4726 unsigned int value = opnd->sysreg.value;
4727 snprintf (buf, size, "%s",
4728 style_reg (styler, "s%u_%u_c%u_c%u_%u",
4729 (value >> 14) & 0x3, (value >> 11) & 0x7,
4730 (value >> 7) & 0xf, (value >> 3) & 0xf,
4735 case AARCH64_OPND_PSTATEFIELD:
4736 for (i = 0; aarch64_pstatefields[i].name; ++i)
4737 if (aarch64_pstatefields[i].value == opnd->pstatefield)
4739 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
4740 SVCRZA and SVCRSMZA. */
4741 uint32_t flags = aarch64_pstatefields[i].flags;
4742 if (flags & F_REG_IN_CRM
4743 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
4744 != PSTATE_DECODE_CRM (flags)))
4748 assert (aarch64_pstatefields[i].name);
4749 snprintf (buf, size, "%s",
4750 style_reg (styler, aarch64_pstatefields[i].name));
4753 case AARCH64_OPND_SYSREG_AT:
4754 case AARCH64_OPND_SYSREG_DC:
4755 case AARCH64_OPND_SYSREG_IC:
4756 case AARCH64_OPND_SYSREG_TLBI:
4757 case AARCH64_OPND_SYSREG_TLBIP:
4758 case AARCH64_OPND_SYSREG_SR:
4759 snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name));
4762 case AARCH64_OPND_BARRIER:
4763 case AARCH64_OPND_BARRIER_DSB_NXS:
4765 if (opnd->barrier->name[0] == '#')
4766 snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name));
4768 snprintf (buf, size, "%s",
4769 style_sub_mnem (styler, opnd->barrier->name));
4773 case AARCH64_OPND_BARRIER_ISB:
4774 /* Operand can be omitted, e.g. in DCPS1. */
4775 if (! optional_operand_p (opcode, idx)
4776 || (opnd->barrier->value
4777 != get_optional_operand_default_value (opcode)))
4778 snprintf (buf, size, "%s",
4779 style_imm (styler, "#0x%x", opnd->barrier->value));
4782 case AARCH64_OPND_PRFOP:
4783 if (opnd->prfop->name != NULL)
4784 snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name));
4786 snprintf (buf, size, "%s", style_imm (styler, "#0x%02x",
4787 opnd->prfop->value));
4790 case AARCH64_OPND_RPRFMOP:
4791 enum_value = opnd->imm.value;
4792 if (enum_value < ARRAY_SIZE (aarch64_rprfmop_array)
4793 && aarch64_rprfmop_array[enum_value])
4794 snprintf (buf, size, "%s",
4795 style_reg (styler, aarch64_rprfmop_array[enum_value]));
4797 snprintf (buf, size, "%s",
4798 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4801 case AARCH64_OPND_BARRIER_PSB:
4802 snprintf (buf, size, "%s", style_sub_mnem (styler, "csync"));
4805 case AARCH64_OPND_X16:
4806 snprintf (buf, size, "%s", style_reg (styler, "x16"));
4809 case AARCH64_OPND_SME_ZT0:
4810 snprintf (buf, size, "%s", style_reg (styler, "zt0"));
4813 case AARCH64_OPND_SME_ZT0_INDEX:
4814 snprintf (buf, size, "%s[%s]", style_reg (styler, "zt0"),
4815 style_imm (styler, "%d", (int) opnd->imm.value));
4818 case AARCH64_OPND_SME_ZT0_LIST:
4819 snprintf (buf, size, "{%s}", style_reg (styler, "zt0"));
4822 case AARCH64_OPND_BARRIER_GCSB:
4823 snprintf (buf, size, "%s", style_sub_mnem (styler, "dsync"));
4826 case AARCH64_OPND_BTI_TARGET:
4827 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
4828 snprintf (buf, size, "%s",
4829 style_sub_mnem (styler, opnd->hint_option->name));
4832 case AARCH64_OPND_MOPS_ADDR_Rd:
4833 case AARCH64_OPND_MOPS_ADDR_Rs:
4834 snprintf (buf, size, "[%s]!",
4836 get_int_reg_name (opnd->reg.regno,
4837 AARCH64_OPND_QLF_X, 0)));
4840 case AARCH64_OPND_MOPS_WB_Rn:
4841 snprintf (buf, size, "%s!",
4842 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4843 AARCH64_OPND_QLF_X, 0)));
4847 snprintf (buf, size, "<invalid>");
4852 #define CPENC(op0,op1,crn,crm,op2) \
4853 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
4854 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
4855 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
4856 /* for 3.9.10 System Instructions */
4857 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
4876 /* TODO there is one more issues need to be resolved
4877 1. handle cpu-implementation-defined system registers.
4879 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4880 respectively. If neither of these are set then the register is read-write. */
4881 const aarch64_sys_reg aarch64_sys_regs [] =
4883 #define SYSREG(name, encoding, flags, features) \
4884 { name, encoding, flags, features },
4885 #include "aarch64-sys-regs.def"
4886 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES }
4891 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
4893 return (reg_flags & F_DEPRECATED) != 0;
4897 aarch64_sys_reg_128bit_p (const uint32_t reg_flags)
4899 return (reg_flags & F_REG_128) != 0;
4903 aarch64_sys_reg_alias_p (const uint32_t reg_flags)
4905 return (reg_flags & F_REG_ALIAS) != 0;
4908 /* The CPENC below is fairly misleading, the fields
4909 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4910 by ins_pstatefield, which just shifts the value by the width of the fields
4911 in a loop. So if you CPENC them only the first value will be set, the rest
4912 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4913 value of 0b110000000001000000 (0x30040) while what you want is
4915 const aarch64_sys_reg aarch64_pstatefields [] =
4917 { "spsel", 0x05, F_REG_MAX_VALUE (1), AARCH64_NO_FEATURES },
4918 { "daifset", 0x1e, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
4919 { "daifclr", 0x1f, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
4920 { "pan", 0x04, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (PAN) },
4921 { "uao", 0x03, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
4922 { "ssbs", 0x19, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (SSBS) },
4923 { "dit", 0x1a, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_4A) },
4924 { "tco", 0x1c, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4925 { "svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x2,0x1) | F_REG_MAX_VALUE (1)
4926 | F_ARCHEXT, AARCH64_FEATURE (SME) },
4927 { "svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x4,0x1) | F_REG_MAX_VALUE (1)
4928 | F_ARCHEXT, AARCH64_FEATURE (SME) },
4929 { "svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x6,0x1) | F_REG_MAX_VALUE (1)
4930 | F_ARCHEXT, AARCH64_FEATURE (SME) },
4931 { "allint", 0x08, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_8A) },
4932 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES },
4936 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4937 const aarch64_sys_reg *reg)
4939 if (!(reg->flags & F_ARCHEXT))
4942 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
4945 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4947 { "ialluis", CPENS(0,C7,C1,0), 0, AARCH64_NO_FEATURES },
4948 { "iallu", CPENS(0,C7,C5,0), 0, AARCH64_NO_FEATURES },
4949 { "ivau", CPENS (3, C7, C5, 1), F_HASXT, AARCH64_NO_FEATURES },
4950 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
4953 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4955 { "zva", CPENS (3, C7, C4, 1), F_HASXT, AARCH64_NO_FEATURES },
4956 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4957 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4958 { "ivac", CPENS (0, C7, C6, 1), F_HASXT, AARCH64_NO_FEATURES },
4959 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4960 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4961 { "isw", CPENS (0, C7, C6, 2), F_HASXT, AARCH64_NO_FEATURES },
4962 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4963 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4964 { "cvac", CPENS (3, C7, C10, 1), F_HASXT, AARCH64_NO_FEATURES },
4965 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4966 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4967 { "csw", CPENS (0, C7, C10, 2), F_HASXT, AARCH64_NO_FEATURES },
4968 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4969 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4970 { "cvau", CPENS (3, C7, C11, 1), F_HASXT, AARCH64_NO_FEATURES },
4971 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
4972 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4973 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4974 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (CVADP) },
4975 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4976 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4977 { "civac", CPENS (3, C7, C14, 1), F_HASXT, AARCH64_NO_FEATURES },
4978 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4979 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4980 { "cisw", CPENS (0, C7, C14, 2), F_HASXT, AARCH64_NO_FEATURES },
4981 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4982 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4983 { "cipapa", CPENS (6, C7, C14, 1), F_HASXT, AARCH64_NO_FEATURES },
4984 { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT, AARCH64_NO_FEATURES },
4985 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
4988 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4990 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES },
4991 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES },
4992 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT, AARCH64_NO_FEATURES },
4993 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT, AARCH64_NO_FEATURES },
4994 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT, AARCH64_NO_FEATURES },
4995 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT, AARCH64_NO_FEATURES },
4996 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT, AARCH64_NO_FEATURES },
4997 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT, AARCH64_NO_FEATURES },
4998 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES },
4999 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES },
5000 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES },
5001 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES },
5002 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
5003 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
5004 { "s1e1a", CPENS (0, C7, C9, 2), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (ATS1A) },
5005 { "s1e2a", CPENS (4, C7, C9, 2), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (ATS1A) },
5006 { "s1e3a", CPENS (6, C7, C9, 2), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (ATS1A) },
5007 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
5010 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
5012 { "rpaos", CPENS (6, C8, C4, 3), F_HASXT, AARCH64_NO_FEATURES },
5013 { "rpalos", CPENS (6, C8, C4, 7), F_HASXT, AARCH64_NO_FEATURES },
5014 { "paallos", CPENS (6, C8, C1, 4), 0, AARCH64_NO_FEATURES },
5015 { "paall", CPENS (6, C8, C7, 4), 0, AARCH64_NO_FEATURES },
5017 #define TLBI_XS_OP(OP, CODE, FLAGS) \
5018 { OP, CODE, FLAGS, AARCH64_NO_FEATURES }, \
5019 { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS | F_ARCHEXT, AARCH64_FEATURE (XS) },
5021 TLBI_XS_OP ( "vmalle1", CPENS (0, C8, C7, 0), 0)
5022 TLBI_XS_OP ( "vae1", CPENS (0, C8, C7, 1), F_HASXT | F_REG_128)
5023 TLBI_XS_OP ( "aside1", CPENS (0, C8, C7, 2), F_HASXT )
5024 TLBI_XS_OP ( "vaae1", CPENS (0, C8, C7, 3), F_HASXT | F_REG_128)
5025 TLBI_XS_OP ( "vmalle1is", CPENS (0, C8, C3, 0), 0)
5026 TLBI_XS_OP ( "vae1is", CPENS (0, C8, C3, 1), F_HASXT | F_REG_128)
5027 TLBI_XS_OP ( "aside1is", CPENS (0, C8, C3, 2), F_HASXT )
5028 TLBI_XS_OP ( "vaae1is", CPENS (0, C8, C3, 3), F_HASXT | F_REG_128)
5029 TLBI_XS_OP ( "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT | F_REG_128)
5030 TLBI_XS_OP ( "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT | F_REG_128)
5031 TLBI_XS_OP ( "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT | F_REG_128)
5032 TLBI_XS_OP ( "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT | F_REG_128)
5033 TLBI_XS_OP ( "vae2", CPENS (4, C8, C7, 1), F_HASXT | F_REG_128)
5034 TLBI_XS_OP ( "vae2is", CPENS (4, C8, C3, 1), F_HASXT | F_REG_128)
5035 TLBI_XS_OP ( "vmalls12e1",CPENS (4, C8, C7, 6), 0)
5036 TLBI_XS_OP ( "vmalls12e1is",CPENS(4,C8, C3, 6), 0)
5037 TLBI_XS_OP ( "vae3", CPENS (6, C8, C7, 1), F_HASXT | F_REG_128)
5038 TLBI_XS_OP ( "vae3is", CPENS (6, C8, C3, 1), F_HASXT | F_REG_128)
5039 TLBI_XS_OP ( "alle2", CPENS (4, C8, C7, 0), 0)
5040 TLBI_XS_OP ( "alle2is", CPENS (4, C8, C3, 0), 0)
5041 TLBI_XS_OP ( "alle1", CPENS (4, C8, C7, 4), 0)
5042 TLBI_XS_OP ( "alle1is", CPENS (4, C8, C3, 4), 0)
5043 TLBI_XS_OP ( "alle3", CPENS (6, C8, C7, 0), 0)
5044 TLBI_XS_OP ( "alle3is", CPENS (6, C8, C3, 0), 0)
5045 TLBI_XS_OP ( "vale1is", CPENS (0, C8, C3, 5), F_HASXT | F_REG_128)
5046 TLBI_XS_OP ( "vale2is", CPENS (4, C8, C3, 5), F_HASXT | F_REG_128)
5047 TLBI_XS_OP ( "vale3is", CPENS (6, C8, C3, 5), F_HASXT | F_REG_128)
5048 TLBI_XS_OP ( "vaale1is", CPENS (0, C8, C3, 7), F_HASXT | F_REG_128)
5049 TLBI_XS_OP ( "vale1", CPENS (0, C8, C7, 5), F_HASXT | F_REG_128)
5050 TLBI_XS_OP ( "vale2", CPENS (4, C8, C7, 5), F_HASXT | F_REG_128)
5051 TLBI_XS_OP ( "vale3", CPENS (6, C8, C7, 5), F_HASXT | F_REG_128)
5052 TLBI_XS_OP ( "vaale1", CPENS (0, C8, C7, 7), F_HASXT | F_REG_128)
5055 #define TLBI_XS_OP(OP, CODE, FLAGS) \
5056 { OP, CODE, FLAGS | F_ARCHEXT, AARCH64_FEATURE (V8_4A) }, \
5057 { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS | F_ARCHEXT, AARCH64_FEATURE (XS) },
5059 TLBI_XS_OP ( "vmalle1os", CPENS (0, C8, C1, 0), 0 )
5060 TLBI_XS_OP ( "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_REG_128 )
5061 TLBI_XS_OP ( "aside1os", CPENS (0, C8, C1, 2), F_HASXT )
5062 TLBI_XS_OP ( "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_REG_128 )
5063 TLBI_XS_OP ( "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_REG_128 )
5064 TLBI_XS_OP ( "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_REG_128 )
5065 TLBI_XS_OP ( "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_REG_128 )
5066 TLBI_XS_OP ( "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_REG_128 )
5067 TLBI_XS_OP ( "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_REG_128 )
5068 TLBI_XS_OP ( "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_REG_128 )
5069 TLBI_XS_OP ( "vmalls12e1os", CPENS (4, C8, C1, 6), 0 )
5070 TLBI_XS_OP ( "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_REG_128 )
5071 TLBI_XS_OP ( "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_REG_128 )
5072 TLBI_XS_OP ( "alle2os", CPENS (4, C8, C1, 0), 0 )
5073 TLBI_XS_OP ( "alle1os", CPENS (4, C8, C1, 4), 0 )
5074 TLBI_XS_OP ( "alle3os", CPENS (6, C8, C1, 0), 0 )
5076 TLBI_XS_OP ( "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_REG_128 )
5077 TLBI_XS_OP ( "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_REG_128 )
5078 TLBI_XS_OP ( "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_REG_128 )
5079 TLBI_XS_OP ( "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_REG_128 )
5080 TLBI_XS_OP ( "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_REG_128 )
5081 TLBI_XS_OP ( "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_REG_128 )
5082 TLBI_XS_OP ( "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_REG_128 )
5083 TLBI_XS_OP ( "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_REG_128 )
5084 TLBI_XS_OP ( "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_REG_128 )
5085 TLBI_XS_OP ( "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_REG_128 )
5086 TLBI_XS_OP ( "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_REG_128 )
5087 TLBI_XS_OP ( "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_REG_128 )
5088 TLBI_XS_OP ( "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_REG_128 )
5089 TLBI_XS_OP ( "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_REG_128 )
5090 TLBI_XS_OP ( "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_REG_128 )
5091 TLBI_XS_OP ( "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_REG_128 )
5092 TLBI_XS_OP ( "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_REG_128 )
5093 TLBI_XS_OP ( "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_REG_128 )
5094 TLBI_XS_OP ( "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_REG_128 )
5095 TLBI_XS_OP ( "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_REG_128 )
5096 TLBI_XS_OP ( "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_REG_128 )
5097 TLBI_XS_OP ( "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_REG_128 )
5098 TLBI_XS_OP ( "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_REG_128 )
5099 TLBI_XS_OP ( "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_REG_128 )
5100 TLBI_XS_OP ( "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_REG_128 )
5101 TLBI_XS_OP ( "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_REG_128 )
5102 TLBI_XS_OP ( "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_REG_128 )
5103 TLBI_XS_OP ( "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_REG_128 )
5104 TLBI_XS_OP ( "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_REG_128 )
5105 TLBI_XS_OP ( "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_REG_128 )
5109 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
5112 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
5114 /* RCTX is somewhat unique in a way that it has different values
5115 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
5116 Thus op2 is masked out and instead encoded directly in the
5117 aarch64_opcode_table entries for the respective instructions. */
5118 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE, AARCH64_FEATURE (PREDRES) }, /* WO */
5119 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES }
5123 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
5125 return (sys_ins_reg->flags & F_HASXT) != 0;
5129 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
5130 const char *reg_name,
5132 const aarch64_feature_set *reg_features)
5134 /* Armv8-R has no EL3. */
5135 if (AARCH64_CPU_HAS_FEATURE (features, V8R))
5137 const char *suffix = strrchr (reg_name, '_');
5138 if (suffix && !strcmp (suffix, "_el3"))
5142 if (!(reg_flags & F_ARCHEXT))
5145 return AARCH64_CPU_HAS_ALL_FEATURES (features, *reg_features);
5165 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5166 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5168 static enum err_type
5169 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5170 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5171 bool encoding ATTRIBUTE_UNUSED,
5172 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5173 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5175 int t = BITS (insn, 4, 0);
5176 int n = BITS (insn, 9, 5);
5177 int t2 = BITS (insn, 14, 10);
5181 /* Write back enabled. */
5182 if ((t == n || t2 == n) && n != 31)
5196 /* Verifier for vector by element 3 operands functions where the
5197 conditions `if sz:L == 11 then UNDEFINED` holds. */
5199 static enum err_type
5200 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5201 bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
5202 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5203 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5205 const aarch64_insn undef_pattern = 0x3;
5208 assert (inst->opcode);
5209 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5210 value = encoding ? inst->value : insn;
5213 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5219 /* Check an instruction that takes three register operands and that
5220 requires the register numbers to be distinct from one another. */
5222 static enum err_type
5223 verify_three_different_regs (const struct aarch64_inst *inst,
5224 const aarch64_insn insn ATTRIBUTE_UNUSED,
5225 bfd_vma pc ATTRIBUTE_UNUSED,
5226 bool encoding ATTRIBUTE_UNUSED,
5227 aarch64_operand_error *mismatch_detail
5229 aarch64_instr_sequence *insn_sequence
5234 rd = inst->operands[0].reg.regno;
5235 rs = inst->operands[1].reg.regno;
5236 rn = inst->operands[2].reg.regno;
5237 if (rd == rs || rd == rn || rs == rn)
5239 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5240 mismatch_detail->error
5241 = _("the three register operands must be distinct from one another");
5242 mismatch_detail->index = -1;
5249 /* Add INST to the end of INSN_SEQUENCE. */
5252 add_insn_to_sequence (const struct aarch64_inst *inst,
5253 aarch64_instr_sequence *insn_sequence)
5255 insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
5258 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5259 If INST is NULL the given insn_sequence is cleared and the sequence is left
5263 init_insn_sequence (const struct aarch64_inst *inst,
5264 aarch64_instr_sequence *insn_sequence)
5266 int num_req_entries = 0;
5268 if (insn_sequence->instr)
5270 XDELETE (insn_sequence->instr);
5271 insn_sequence->instr = NULL;
5274 /* Handle all the cases here. May need to think of something smarter than
5275 a giant if/else chain if this grows. At that time, a lookup table may be
5277 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5278 num_req_entries = 1;
5279 if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
5280 num_req_entries = 2;
5282 insn_sequence->num_added_insns = 0;
5283 insn_sequence->num_allocated_insns = num_req_entries;
5285 if (num_req_entries != 0)
5287 insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
5288 add_insn_to_sequence (inst, insn_sequence);
5292 /* Subroutine of verify_constraints. Check whether the instruction
5293 is part of a MOPS P/M/E sequence and, if so, whether sequencing
5294 expectations are met. Return true if the check passes, otherwise
5295 describe the problem in MISMATCH_DETAIL.
5297 IS_NEW_SECTION is true if INST is assumed to start a new section.
5298 The other arguments are as for verify_constraints. */
5301 verify_mops_pme_sequence (const struct aarch64_inst *inst,
5302 bool is_new_section,
5303 aarch64_operand_error *mismatch_detail,
5304 aarch64_instr_sequence *insn_sequence)
5306 const struct aarch64_opcode *opcode;
5307 const struct aarch64_inst *prev_insn;
5310 opcode = inst->opcode;
5311 if (insn_sequence->instr)
5312 prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
5317 && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
5318 && prev_insn->opcode != opcode - 1)
5320 mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
5321 mismatch_detail->error = NULL;
5322 mismatch_detail->index = -1;
5323 mismatch_detail->data[0].s = prev_insn->opcode[1].name;
5324 mismatch_detail->data[1].s = prev_insn->opcode->name;
5325 mismatch_detail->non_fatal = true;
5329 if (opcode->constraints & C_SCAN_MOPS_PME)
5331 if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
5333 mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
5334 mismatch_detail->error = NULL;
5335 mismatch_detail->index = -1;
5336 mismatch_detail->data[0].s = opcode->name;
5337 mismatch_detail->data[1].s = opcode[-1].name;
5338 mismatch_detail->non_fatal = true;
5342 for (i = 0; i < 3; ++i)
5343 /* There's no specific requirement for the data register to be
5344 the same between consecutive SET* instructions. */
5345 if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
5346 || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
5347 || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
5348 && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
5350 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5351 if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
5352 mismatch_detail->error = _("destination register differs from "
5353 "preceding instruction");
5354 else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
5355 mismatch_detail->error = _("source register differs from "
5356 "preceding instruction");
5358 mismatch_detail->error = _("size register differs from "
5359 "preceding instruction");
5360 mismatch_detail->index = i;
5361 mismatch_detail->non_fatal = true;
5369 /* This function verifies that the instruction INST adheres to its specified
5370 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5371 returned and MISMATCH_DETAIL contains the reason why verification failed.
5373 The function is called both during assembly and disassembly. If assembling
5374 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5375 and will contain the PC of the current instruction w.r.t to the section.
5377 If ENCODING and PC=0 then you are at a start of a section. The constraints
5378 are verified against the given state insn_sequence which is updated as it
5379 transitions through the verification. */
5382 verify_constraints (const struct aarch64_inst *inst,
5383 const aarch64_insn insn ATTRIBUTE_UNUSED,
5386 aarch64_operand_error *mismatch_detail,
5387 aarch64_instr_sequence *insn_sequence)
5390 assert (inst->opcode);
5392 const struct aarch64_opcode *opcode = inst->opcode;
5393 if (!opcode->constraints && !insn_sequence->instr)
5396 assert (insn_sequence);
5398 enum err_type res = ERR_OK;
5400 /* This instruction puts a constraint on the insn_sequence. */
5401 if (opcode->flags & F_SCAN)
5403 if (insn_sequence->instr)
5405 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5406 mismatch_detail->error = _("instruction opens new dependency "
5407 "sequence without ending previous one");
5408 mismatch_detail->index = -1;
5409 mismatch_detail->non_fatal = true;
5413 init_insn_sequence (inst, insn_sequence);
5417 bool is_new_section = (!encoding && pc == 0);
5418 if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
5422 if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
5423 init_insn_sequence (NULL, insn_sequence);
5426 /* Verify constraints on an existing sequence. */
5427 if (insn_sequence->instr)
5429 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5430 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5431 closed a previous one that we should have. */
5432 if (is_new_section && res == ERR_OK)
5434 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5435 mismatch_detail->error = _("previous `movprfx' sequence not closed");
5436 mismatch_detail->index = -1;
5437 mismatch_detail->non_fatal = true;
5439 /* Reset the sequence. */
5440 init_insn_sequence (NULL, insn_sequence);
5444 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5445 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5447 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5448 instruction for better error messages. */
5449 if (!opcode->avariant
5450 || (!AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE)
5451 && !AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE2)))
5453 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5454 mismatch_detail->error = _("SVE instruction expected after "
5456 mismatch_detail->index = -1;
5457 mismatch_detail->non_fatal = true;
5462 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5463 instruction that is allowed to be used with a MOVPRFX. */
5464 if (!(opcode->constraints & C_SCAN_MOVPRFX))
5466 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5467 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
5469 mismatch_detail->index = -1;
5470 mismatch_detail->non_fatal = true;
5475 /* Next check for usage of the predicate register. */
5476 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
5477 aarch64_opnd_info blk_pred, inst_pred;
5478 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
5479 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
5480 bool predicated = false;
5481 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
5483 /* Determine if the movprfx instruction used is predicated or not. */
5484 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
5487 blk_pred = insn_sequence->instr->operands[1];
5490 unsigned char max_elem_size = 0;
5491 unsigned char current_elem_size;
5492 int num_op_used = 0, last_op_usage = 0;
5493 int i, inst_pred_idx = -1;
5494 int num_ops = aarch64_num_of_operands (opcode);
5495 for (i = 0; i < num_ops; i++)
5497 aarch64_opnd_info inst_op = inst->operands[i];
5498 switch (inst_op.type)
5500 case AARCH64_OPND_SVE_Zd:
5501 case AARCH64_OPND_SVE_Zm_5:
5502 case AARCH64_OPND_SVE_Zm_16:
5503 case AARCH64_OPND_SVE_Zn:
5504 case AARCH64_OPND_SVE_Zt:
5505 case AARCH64_OPND_SVE_Vm:
5506 case AARCH64_OPND_SVE_Vn:
5507 case AARCH64_OPND_Va:
5508 case AARCH64_OPND_Vn:
5509 case AARCH64_OPND_Vm:
5510 case AARCH64_OPND_Sn:
5511 case AARCH64_OPND_Sm:
5512 if (inst_op.reg.regno == blk_dest.reg.regno)
5518 = aarch64_get_qualifier_esize (inst_op.qualifier);
5519 if (current_elem_size > max_elem_size)
5520 max_elem_size = current_elem_size;
5522 case AARCH64_OPND_SVE_Pd:
5523 case AARCH64_OPND_SVE_Pg3:
5524 case AARCH64_OPND_SVE_Pg4_5:
5525 case AARCH64_OPND_SVE_Pg4_10:
5526 case AARCH64_OPND_SVE_Pg4_16:
5527 case AARCH64_OPND_SVE_Pm:
5528 case AARCH64_OPND_SVE_Pn:
5529 case AARCH64_OPND_SVE_Pt:
5530 case AARCH64_OPND_SME_Pm:
5531 inst_pred = inst_op;
5539 assert (max_elem_size != 0);
5540 aarch64_opnd_info inst_dest = inst->operands[0];
5541 /* Determine the size that should be used to compare against the
5544 = opcode->constraints & C_MAX_ELEM
5546 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5548 /* If movprfx is predicated do some extra checks. */
5551 /* The instruction must be predicated. */
5552 if (inst_pred_idx < 0)
5554 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5555 mismatch_detail->error = _("predicated instruction expected "
5557 mismatch_detail->index = -1;
5558 mismatch_detail->non_fatal = true;
5563 /* The instruction must have a merging predicate. */
5564 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5566 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5567 mismatch_detail->error = _("merging predicate expected due "
5568 "to preceding `movprfx'");
5569 mismatch_detail->index = inst_pred_idx;
5570 mismatch_detail->non_fatal = true;
5575 /* The same register must be used in instruction. */
5576 if (blk_pred.reg.regno != inst_pred.reg.regno)
5578 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5579 mismatch_detail->error = _("predicate register differs "
5580 "from that in preceding "
5582 mismatch_detail->index = inst_pred_idx;
5583 mismatch_detail->non_fatal = true;
5589 /* Destructive operations by definition must allow one usage of the
5592 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5594 /* Operand is not used at all. */
5595 if (num_op_used == 0)
5597 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5598 mismatch_detail->error = _("output register of preceding "
5599 "`movprfx' not used in current "
5601 mismatch_detail->index = 0;
5602 mismatch_detail->non_fatal = true;
5607 /* We now know it's used, now determine exactly where it's used. */
5608 if (blk_dest.reg.regno != inst_dest.reg.regno)
5610 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5611 mismatch_detail->error = _("output register of preceding "
5612 "`movprfx' expected as output");
5613 mismatch_detail->index = 0;
5614 mismatch_detail->non_fatal = true;
5619 /* Operand used more than allowed for the specific opcode type. */
5620 if (num_op_used > allowed_usage)
5622 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5623 mismatch_detail->error = _("output register of preceding "
5624 "`movprfx' used as input");
5625 mismatch_detail->index = last_op_usage;
5626 mismatch_detail->non_fatal = true;
5631 /* Now the only thing left is the qualifiers checks. The register
5632 must have the same maximum element size. */
5633 if (inst_dest.qualifier
5634 && blk_dest.qualifier
5635 && current_elem_size
5636 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5638 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5639 mismatch_detail->error = _("register size not compatible with "
5640 "previous `movprfx'");
5641 mismatch_detail->index = 0;
5642 mismatch_detail->non_fatal = true;
5649 if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
5650 /* We've checked the last instruction in the sequence and so
5651 don't need the sequence any more. */
5652 init_insn_sequence (NULL, insn_sequence);
5654 add_insn_to_sequence (inst, insn_sequence);
5661 /* Return true if VALUE cannot be moved into an SVE register using DUP
5662 (with any element size, not just ESIZE) and if using DUPM would
5663 therefore be OK. ESIZE is the number of bytes in the immediate. */
5666 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5668 int64_t svalue = uvalue;
5669 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5671 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5673 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5675 svalue = (int32_t) uvalue;
5676 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5678 svalue = (int16_t) uvalue;
5679 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5683 if ((svalue & 0xff) == 0)
5685 return svalue < -128 || svalue >= 128;
5688 /* Return true if a CPU with the AARCH64_FEATURE_* bits in CPU_VARIANT
5689 supports the instruction described by INST. */
5692 aarch64_cpu_supports_inst_p (aarch64_feature_set cpu_variant,
5695 if (!inst->opcode->avariant
5696 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *inst->opcode->avariant))
5699 if (inst->opcode->iclass == sme_fp_sd
5700 && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
5701 && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_F64F64))
5704 if (inst->opcode->iclass == sme_int_sd
5705 && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
5706 && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_I16I64))
5712 /* Include the opcode description table as well as the operand description
5714 #define VERIFIER(x) verify_##x
5715 #include "aarch64-tbl.h"