1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GAS.
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
25 #include "bfd_stdint.h"
27 #include "safe-ctype.h"
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
36 #include "dwarf2dbg.h"
38 /* Types of processor to assemble for. */
40 #define CPU_DEFAULT AARCH64_ARCH_V8
43 #define streq(a, b) (strcmp (a, b) == 0)
45 #define END_OF_INSN '\0'
47 static aarch64_feature_set cpu_variant;
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
65 /* Which ABI to use. */
74 #define DEFAULT_ARCH "aarch64"
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
107 struct vector_type_el
109 enum vector_el_type type;
110 unsigned char defined;
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
119 bfd_reloc_code_real_type type;
122 enum aarch64_opnd opnd;
124 unsigned need_libopcodes_p : 1;
127 struct aarch64_instruction
129 /* libopcodes structure for instruction intermediate representation. */
131 /* Record assembly errors found during the parsing. */
134 enum aarch64_operand_error_kind kind;
137 /* The condition that appears in the assembly line. */
139 /* Relocation information (including the GAS internal fixup). */
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
145 typedef struct aarch64_instruction aarch64_instruction;
147 static aarch64_instruction inst;
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
156 static struct aarch64_instr_sequence now_instr_sequence;
159 /* Diagnostics inline function utilities.
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
183 static inline bfd_boolean
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
189 static inline const char *
190 get_error_message (void)
192 return inst.parsing_error.error;
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
198 return inst.parsing_error.kind;
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
209 set_recoverable_error (const char *error)
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
217 set_default_error (void)
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
223 set_syntax_error (const char *error)
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
229 set_first_syntax_error (const char *error)
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
236 set_fatal_syntax_error (const char *error)
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
241 /* Number of littlenums required to hold an extended precision number. */
242 #define MAX_LITTLENUMS 6
244 /* Return value for certain parsers when the parsing fails; those parsers
245 return the information of the parsed result, e.g. register number, on
247 #define PARSE_FAIL -1
249 /* This is an invalid condition code that means no conditional field is
251 #define COND_ALWAYS 0x10
255 const char *template;
261 const char *template;
268 bfd_reloc_code_real_type reloc;
271 /* Macros to define the register types and masks for the purpose
274 #undef AARCH64_REG_TYPES
275 #define AARCH64_REG_TYPES \
276 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
277 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
278 BASIC_REG_TYPE(SP_32) /* wsp */ \
279 BASIC_REG_TYPE(SP_64) /* sp */ \
280 BASIC_REG_TYPE(Z_32) /* wzr */ \
281 BASIC_REG_TYPE(Z_64) /* xzr */ \
282 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
283 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
284 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
285 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
286 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
287 BASIC_REG_TYPE(VN) /* v[0-31] */ \
288 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
289 BASIC_REG_TYPE(PN) /* p[0-15] */ \
290 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
291 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
292 /* Typecheck: same, plus SVE registers. */ \
293 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
295 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
296 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
298 /* Typecheck: same, plus SVE registers. */ \
299 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
300 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
302 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
303 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
305 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
306 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
307 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
309 /* Typecheck: any [BHSDQ]P FP. */ \
310 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
311 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
312 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
313 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
314 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
315 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
316 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
317 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
318 be used for SVE instructions, since Zn and Pn are valid symbols \
319 in other contexts. */ \
320 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
322 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
323 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
324 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
325 | REG_TYPE(ZN) | REG_TYPE(PN)) \
326 /* Any integer register; used for error messages only. */ \
327 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
329 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
330 /* Pseudo type to mark the end of the enumerator sequence. */ \
333 #undef BASIC_REG_TYPE
334 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
335 #undef MULTI_REG_TYPE
336 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
338 /* Register type enumerators. */
339 typedef enum aarch64_reg_type_
341 /* A list of REG_TYPE_*. */
345 #undef BASIC_REG_TYPE
346 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
348 #define REG_TYPE(T) (1 << REG_TYPE_##T)
349 #undef MULTI_REG_TYPE
350 #define MULTI_REG_TYPE(T,V) V,
352 /* Structure for a hash table entry for a register. */
356 unsigned char number;
357 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
358 unsigned char builtin;
361 /* Values indexed by aarch64_reg_type to assist the type checking. */
362 static const unsigned reg_type_masks[] =
367 #undef BASIC_REG_TYPE
369 #undef MULTI_REG_TYPE
370 #undef AARCH64_REG_TYPES
372 /* Diagnostics used when we don't get a register of the expected type.
373 Note: this has to synchronized with aarch64_reg_type definitions
376 get_reg_expected_msg (aarch64_reg_type reg_type)
383 msg = N_("integer 32-bit register expected");
386 msg = N_("integer 64-bit register expected");
389 msg = N_("integer register expected");
391 case REG_TYPE_R64_SP:
392 msg = N_("64-bit integer or SP register expected");
394 case REG_TYPE_SVE_BASE:
395 msg = N_("base register expected");
398 msg = N_("integer or zero register expected");
400 case REG_TYPE_SVE_OFFSET:
401 msg = N_("offset register expected");
404 msg = N_("integer or SP register expected");
406 case REG_TYPE_R_Z_SP:
407 msg = N_("integer, zero or SP register expected");
410 msg = N_("8-bit SIMD scalar register expected");
413 msg = N_("16-bit SIMD scalar or floating-point half precision "
414 "register expected");
417 msg = N_("32-bit SIMD scalar or floating-point single precision "
418 "register expected");
421 msg = N_("64-bit SIMD scalar or floating-point double precision "
422 "register expected");
425 msg = N_("128-bit SIMD scalar or floating-point quad precision "
426 "register expected");
428 case REG_TYPE_R_Z_BHSDQ_V:
429 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
430 msg = N_("register expected");
432 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
433 msg = N_("SIMD scalar or floating-point register expected");
435 case REG_TYPE_VN: /* any V reg */
436 msg = N_("vector register expected");
439 msg = N_("SVE vector register expected");
442 msg = N_("SVE predicate register expected");
445 as_fatal (_("invalid register type %d"), reg_type);
450 /* Some well known registers that we refer to directly elsewhere. */
454 /* Instructions take 4 bytes in the object file. */
457 static struct hash_control *aarch64_ops_hsh;
458 static struct hash_control *aarch64_cond_hsh;
459 static struct hash_control *aarch64_shift_hsh;
460 static struct hash_control *aarch64_sys_regs_hsh;
461 static struct hash_control *aarch64_pstatefield_hsh;
462 static struct hash_control *aarch64_sys_regs_ic_hsh;
463 static struct hash_control *aarch64_sys_regs_dc_hsh;
464 static struct hash_control *aarch64_sys_regs_at_hsh;
465 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
466 static struct hash_control *aarch64_sys_regs_sr_hsh;
467 static struct hash_control *aarch64_reg_hsh;
468 static struct hash_control *aarch64_barrier_opt_hsh;
469 static struct hash_control *aarch64_nzcv_hsh;
470 static struct hash_control *aarch64_pldop_hsh;
471 static struct hash_control *aarch64_hint_opt_hsh;
473 /* Stuff needed to resolve the label ambiguity
482 static symbolS *last_label_seen;
484 /* Literal pool structure. Held on a per-section
485 and per-sub-section basis. */
487 #define MAX_LITERAL_POOL_SIZE 1024
488 typedef struct literal_expression
491 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
492 LITTLENUM_TYPE * bignum;
493 } literal_expression;
495 typedef struct literal_pool
497 literal_expression literals[MAX_LITERAL_POOL_SIZE];
498 unsigned int next_free_entry;
504 struct literal_pool *next;
507 /* Pointer to a linked list of literal pools. */
508 static literal_pool *list_of_pools = NULL;
512 /* This array holds the chars that always start a comment. If the
513 pre-processor is disabled, these aren't very useful. */
514 const char comment_chars[] = "";
516 /* This array holds the chars that only start a comment at the beginning of
517 a line. If the line seems to have the form '# 123 filename'
518 .line and .file directives will appear in the pre-processed output. */
519 /* Note that input_file.c hand checks for '#' at the beginning of the
520 first line of the input file. This is because the compiler outputs
521 #NO_APP at the beginning of its output. */
522 /* Also note that comments like this one will always work. */
523 const char line_comment_chars[] = "#";
525 const char line_separator_chars[] = ";";
527 /* Chars that can be used to separate mant
528 from exp in floating point numbers. */
529 const char EXP_CHARS[] = "eE";
531 /* Chars that mean this number is a floating point constant. */
535 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
537 /* Prefix character that indicates the start of an immediate value. */
538 #define is_immediate_prefix(C) ((C) == '#')
540 /* Separator character handling. */
542 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
544 static inline bfd_boolean
545 skip_past_char (char **str, char c)
556 #define skip_past_comma(str) skip_past_char (str, ',')
558 /* Arithmetic expressions (possibly involving symbols). */
560 static bfd_boolean in_my_get_expression_p = FALSE;
562 /* Third argument to my_get_expression. */
563 #define GE_NO_PREFIX 0
564 #define GE_OPT_PREFIX 1
566 /* Return TRUE if the string pointed by *STR is successfully parsed
567 as an valid expression; *EP will be filled with the information of
568 such an expression. Otherwise return FALSE. */
571 my_get_expression (expressionS * ep, char **str, int prefix_mode,
576 int prefix_present_p = 0;
583 if (is_immediate_prefix (**str))
586 prefix_present_p = 1;
593 memset (ep, 0, sizeof (expressionS));
595 save_in = input_line_pointer;
596 input_line_pointer = *str;
597 in_my_get_expression_p = TRUE;
598 seg = expression (ep);
599 in_my_get_expression_p = FALSE;
601 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
603 /* We found a bad expression in md_operand(). */
604 *str = input_line_pointer;
605 input_line_pointer = save_in;
606 if (prefix_present_p && ! error_p ())
607 set_fatal_syntax_error (_("bad expression"));
609 set_first_syntax_error (_("bad expression"));
614 if (seg != absolute_section
615 && seg != text_section
616 && seg != data_section
617 && seg != bss_section && seg != undefined_section)
619 set_syntax_error (_("bad segment"));
620 *str = input_line_pointer;
621 input_line_pointer = save_in;
628 *str = input_line_pointer;
629 input_line_pointer = save_in;
633 /* Turn a string in input_line_pointer into a floating point constant
634 of type TYPE, and store the appropriate bytes in *LITP. The number
635 of LITTLENUMS emitted is stored in *SIZEP. An error message is
636 returned, or NULL on OK. */
639 md_atof (int type, char *litP, int *sizeP)
641 return ieee_md_atof (type, litP, sizeP, target_big_endian);
644 /* We handle all bad expressions here, so that we can report the faulty
645 instruction in the error message. */
647 md_operand (expressionS * exp)
649 if (in_my_get_expression_p)
650 exp->X_op = O_illegal;
653 /* Immediate values. */
655 /* Errors may be set multiple times during parsing or bit encoding
656 (particularly in the Neon bits), but usually the earliest error which is set
657 will be the most meaningful. Avoid overwriting it with later (cascading)
658 errors by calling this function. */
661 first_error (const char *error)
664 set_syntax_error (error);
667 /* Similar to first_error, but this function accepts formatted error
670 first_error_fmt (const char *format, ...)
675 /* N.B. this single buffer will not cause error messages for different
676 instructions to pollute each other; this is because at the end of
677 processing of each assembly line, error message if any will be
678 collected by as_bad. */
679 static char buffer[size];
683 int ret ATTRIBUTE_UNUSED;
684 va_start (args, format);
685 ret = vsnprintf (buffer, size, format, args);
686 know (ret <= size - 1 && ret >= 0);
688 set_syntax_error (buffer);
692 /* Register parsing. */
694 /* Generic register parser which is called by other specialized
696 CCP points to what should be the beginning of a register name.
697 If it is indeed a valid register name, advance CCP over it and
698 return the reg_entry structure; otherwise return NULL.
699 It does not issue diagnostics. */
702 parse_reg (char **ccp)
708 #ifdef REGISTER_PREFIX
709 if (*start != REGISTER_PREFIX)
715 if (!ISALPHA (*p) || !is_name_beginner (*p))
720 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
722 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
731 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
734 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
736 return (reg_type_masks[type] & (1 << reg->type)) != 0;
739 /* Try to parse a base or offset register. Allow SVE base and offset
740 registers if REG_TYPE includes SVE registers. Return the register
741 entry on success, setting *QUALIFIER to the register qualifier.
742 Return null otherwise.
744 Note that this function does not issue any diagnostics. */
746 static const reg_entry *
747 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
748 aarch64_opnd_qualifier_t *qualifier)
751 const reg_entry *reg = parse_reg (&str);
761 *qualifier = AARCH64_OPND_QLF_W;
767 *qualifier = AARCH64_OPND_QLF_X;
771 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
774 switch (TOLOWER (str[1]))
777 *qualifier = AARCH64_OPND_QLF_S_S;
780 *qualifier = AARCH64_OPND_QLF_S_D;
797 /* Try to parse a base or offset register. Return the register entry
798 on success, setting *QUALIFIER to the register qualifier. Return null
801 Note that this function does not issue any diagnostics. */
803 static const reg_entry *
804 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
806 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
809 /* Parse the qualifier of a vector register or vector element of type
810 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
811 succeeds; otherwise return FALSE.
813 Accept only one occurrence of:
814 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
817 parse_vector_type_for_operand (aarch64_reg_type reg_type,
818 struct vector_type_el *parsed_type, char **str)
822 unsigned element_size;
823 enum vector_el_type type;
826 gas_assert (*ptr == '.');
829 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
834 width = strtoul (ptr, &ptr, 10);
835 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
837 first_error_fmt (_("bad size %d in vector width specifier"), width);
842 switch (TOLOWER (*ptr))
861 if (reg_type == REG_TYPE_ZN || width == 1)
870 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
872 first_error (_("missing element size"));
875 if (width != 0 && width * element_size != 64
876 && width * element_size != 128
877 && !(width == 2 && element_size == 16)
878 && !(width == 4 && element_size == 8))
881 ("invalid element size %d and vector size combination %c"),
887 parsed_type->type = type;
888 parsed_type->width = width;
895 /* *STR contains an SVE zero/merge predication suffix. Parse it into
896 *PARSED_TYPE and point *STR at the end of the suffix. */
899 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
904 gas_assert (*ptr == '/');
906 switch (TOLOWER (*ptr))
909 parsed_type->type = NT_zero;
912 parsed_type->type = NT_merge;
915 if (*ptr != '\0' && *ptr != ',')
916 first_error_fmt (_("unexpected character `%c' in predication type"),
919 first_error (_("missing predication type"));
922 parsed_type->width = 0;
927 /* Parse a register of the type TYPE.
929 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
930 name or the parsed register is not of TYPE.
932 Otherwise return the register number, and optionally fill in the actual
933 type of the register in *RTYPE when multiple alternatives were given, and
934 return the register shape and element index information in *TYPEINFO.
936 IN_REG_LIST should be set with TRUE if the caller is parsing a register
940 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
941 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
944 const reg_entry *reg = parse_reg (&str);
945 struct vector_type_el atype;
946 struct vector_type_el parsetype;
947 bfd_boolean is_typed_vecreg = FALSE;
950 atype.type = NT_invtype;
958 set_default_error ();
962 if (! aarch64_check_reg_type (reg, type))
964 DEBUG_TRACE ("reg type check failed");
965 set_default_error ();
970 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
971 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
975 if (!parse_vector_type_for_operand (type, &parsetype, &str))
980 if (!parse_predication_for_operand (&parsetype, &str))
984 /* Register if of the form Vn.[bhsdq]. */
985 is_typed_vecreg = TRUE;
987 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
989 /* The width is always variable; we don't allow an integer width
991 gas_assert (parsetype.width == 0);
992 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
994 else if (parsetype.width == 0)
995 /* Expect index. In the new scheme we cannot have
996 Vn.[bhsdq] represent a scalar. Therefore any
997 Vn.[bhsdq] should have an index following it.
998 Except in reglists of course. */
999 atype.defined |= NTA_HASINDEX;
1001 atype.defined |= NTA_HASTYPE;
1003 atype.type = parsetype.type;
1004 atype.width = parsetype.width;
1007 if (skip_past_char (&str, '['))
1011 /* Reject Sn[index] syntax. */
1012 if (!is_typed_vecreg)
1014 first_error (_("this type of register can't be indexed"));
1020 first_error (_("index not allowed inside register list"));
1024 atype.defined |= NTA_HASINDEX;
1026 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1028 if (exp.X_op != O_constant)
1030 first_error (_("constant expression required"));
1034 if (! skip_past_char (&str, ']'))
1037 atype.index = exp.X_add_number;
1039 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1041 /* Indexed vector register expected. */
1042 first_error (_("indexed vector register expected"));
1046 /* A vector reg Vn should be typed or indexed. */
1047 if (type == REG_TYPE_VN && atype.defined == 0)
1049 first_error (_("invalid use of vector register"));
1065 Return the register number on success; return PARSE_FAIL otherwise.
1067 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1068 the register (e.g. NEON double or quad reg when either has been requested).
1070 If this is a NEON vector register with additional type information, fill
1071 in the struct pointed to by VECTYPE (if non-NULL).
1073 This parser does not handle register list. */
1076 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1077 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1079 struct vector_type_el atype;
1081 int reg = parse_typed_reg (&str, type, rtype, &atype,
1082 /*in_reg_list= */ FALSE);
1084 if (reg == PARSE_FAIL)
1095 static inline bfd_boolean
1096 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1100 && e1.defined == e2.defined
1101 && e1.width == e2.width && e1.index == e2.index;
1104 /* This function parses a list of vector registers of type TYPE.
1105 On success, it returns the parsed register list information in the
1106 following encoded format:
1108 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1109 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1111 The information of the register shape and/or index is returned in
1114 It returns PARSE_FAIL if the register list is invalid.
1116 The list contains one to four registers.
1117 Each register can be one of:
1120 All <T> should be identical.
1121 All <index> should be identical.
1122 There are restrictions on <Vt> numbers which are checked later
1123 (by reg_list_valid_p). */
1126 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1127 struct vector_type_el *vectype)
1131 struct vector_type_el typeinfo, typeinfo_first;
1136 bfd_boolean error = FALSE;
1137 bfd_boolean expect_index = FALSE;
1141 set_syntax_error (_("expecting {"));
1147 typeinfo_first.defined = 0;
1148 typeinfo_first.type = NT_invtype;
1149 typeinfo_first.width = -1;
1150 typeinfo_first.index = 0;
1159 str++; /* skip over '-' */
1162 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1163 /*in_reg_list= */ TRUE);
1164 if (val == PARSE_FAIL)
1166 set_first_syntax_error (_("invalid vector register in list"));
1170 /* reject [bhsd]n */
1171 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1173 set_first_syntax_error (_("invalid scalar register in list"));
1178 if (typeinfo.defined & NTA_HASINDEX)
1179 expect_index = TRUE;
1183 if (val < val_range)
1185 set_first_syntax_error
1186 (_("invalid range in vector register list"));
1195 typeinfo_first = typeinfo;
1196 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1198 set_first_syntax_error
1199 (_("type mismatch in vector register list"));
1204 for (i = val_range; i <= val; i++)
1206 ret_val |= i << (5 * nb_regs);
1211 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1213 skip_whitespace (str);
1216 set_first_syntax_error (_("end of vector register list not found"));
1221 skip_whitespace (str);
1225 if (skip_past_char (&str, '['))
1229 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1230 if (exp.X_op != O_constant)
1232 set_first_syntax_error (_("constant expression required."));
1235 if (! skip_past_char (&str, ']'))
1238 typeinfo_first.index = exp.X_add_number;
1242 set_first_syntax_error (_("expected index"));
1249 set_first_syntax_error (_("too many registers in vector register list"));
1252 else if (nb_regs == 0)
1254 set_first_syntax_error (_("empty vector register list"));
1260 *vectype = typeinfo_first;
1262 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1265 /* Directives: register aliases. */
1268 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1273 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1276 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1279 /* Only warn about a redefinition if it's not defined as the
1281 else if (new->number != number || new->type != type)
1282 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1287 name = xstrdup (str);
1288 new = XNEW (reg_entry);
1291 new->number = number;
1293 new->builtin = FALSE;
1295 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1301 /* Look for the .req directive. This is of the form:
1303 new_register_name .req existing_register_name
1305 If we find one, or if it looks sufficiently like one that we want to
1306 handle any error here, return TRUE. Otherwise return FALSE. */
1309 create_register_alias (char *newname, char *p)
1311 const reg_entry *old;
1312 char *oldname, *nbuf;
1315 /* The input scrubber ensures that whitespace after the mnemonic is
1316 collapsed to single spaces. */
1318 if (strncmp (oldname, " .req ", 6) != 0)
1322 if (*oldname == '\0')
1325 old = hash_find (aarch64_reg_hsh, oldname);
1328 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1332 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1333 the desired alias name, and p points to its end. If not, then
1334 the desired alias name is in the global original_case_string. */
1335 #ifdef TC_CASE_SENSITIVE
1338 newname = original_case_string;
1339 nlen = strlen (newname);
1342 nbuf = xmemdup0 (newname, nlen);
1344 /* Create aliases under the new name as stated; an all-lowercase
1345 version of the new name; and an all-uppercase version of the new
1347 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1349 for (p = nbuf; *p; p++)
1352 if (strncmp (nbuf, newname, nlen))
1354 /* If this attempt to create an additional alias fails, do not bother
1355 trying to create the all-lower case alias. We will fail and issue
1356 a second, duplicate error message. This situation arises when the
1357 programmer does something like:
1360 The second .req creates the "Foo" alias but then fails to create
1361 the artificial FOO alias because it has already been created by the
1363 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1370 for (p = nbuf; *p; p++)
1373 if (strncmp (nbuf, newname, nlen))
1374 insert_reg_alias (nbuf, old->number, old->type);
1381 /* Should never be called, as .req goes between the alias and the
1382 register name, not at the beginning of the line. */
1384 s_req (int a ATTRIBUTE_UNUSED)
1386 as_bad (_("invalid syntax for .req directive"));
1389 /* The .unreq directive deletes an alias which was previously defined
1390 by .req. For example:
1396 s_unreq (int a ATTRIBUTE_UNUSED)
1401 name = input_line_pointer;
1403 while (*input_line_pointer != 0
1404 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1405 ++input_line_pointer;
1407 saved_char = *input_line_pointer;
1408 *input_line_pointer = 0;
1411 as_bad (_("invalid syntax for .unreq directive"));
1414 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1417 as_bad (_("unknown register alias '%s'"), name);
1418 else if (reg->builtin)
1419 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1426 hash_delete (aarch64_reg_hsh, name, FALSE);
1427 free ((char *) reg->name);
1430 /* Also locate the all upper case and all lower case versions.
1431 Do not complain if we cannot find one or the other as it
1432 was probably deleted above. */
1434 nbuf = strdup (name);
1435 for (p = nbuf; *p; p++)
1437 reg = hash_find (aarch64_reg_hsh, nbuf);
1440 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1441 free ((char *) reg->name);
1445 for (p = nbuf; *p; p++)
1447 reg = hash_find (aarch64_reg_hsh, nbuf);
1450 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1451 free ((char *) reg->name);
1459 *input_line_pointer = saved_char;
1460 demand_empty_rest_of_line ();
1463 /* Directives: Instruction set selection. */
1466 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1467 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1468 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1469 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1471 /* Create a new mapping symbol for the transition to STATE. */
1474 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1477 const char *symname;
1484 type = BSF_NO_FLAGS;
1488 type = BSF_NO_FLAGS;
1494 symbolP = symbol_new (symname, now_seg, value, frag);
1495 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1497 /* Save the mapping symbols for future reference. Also check that
1498 we do not place two mapping symbols at the same offset within a
1499 frag. We'll handle overlap between frags in
1500 check_mapping_symbols.
1502 If .fill or other data filling directive generates zero sized data,
1503 the mapping symbol for the following code will have the same value
1504 as the one generated for the data filling directive. In this case,
1505 we replace the old symbol with the new one at the same address. */
1508 if (frag->tc_frag_data.first_map != NULL)
1510 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1511 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1514 frag->tc_frag_data.first_map = symbolP;
1516 if (frag->tc_frag_data.last_map != NULL)
1518 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1519 S_GET_VALUE (symbolP));
1520 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1521 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1524 frag->tc_frag_data.last_map = symbolP;
1527 /* We must sometimes convert a region marked as code to data during
1528 code alignment, if an odd number of bytes have to be padded. The
1529 code mapping symbol is pushed to an aligned address. */
1532 insert_data_mapping_symbol (enum mstate state,
1533 valueT value, fragS * frag, offsetT bytes)
1535 /* If there was already a mapping symbol, remove it. */
1536 if (frag->tc_frag_data.last_map != NULL
1537 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1538 frag->fr_address + value)
1540 symbolS *symp = frag->tc_frag_data.last_map;
1544 know (frag->tc_frag_data.first_map == symp);
1545 frag->tc_frag_data.first_map = NULL;
1547 frag->tc_frag_data.last_map = NULL;
1548 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1551 make_mapping_symbol (MAP_DATA, value, frag);
1552 make_mapping_symbol (state, value + bytes, frag);
1555 static void mapping_state_2 (enum mstate state, int max_chars);
1557 /* Set the mapping state to STATE. Only call this when about to
1558 emit some STATE bytes to the file. */
1561 mapping_state (enum mstate state)
1563 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1565 if (state == MAP_INSN)
1566 /* AArch64 instructions require 4-byte alignment. When emitting
1567 instructions into any section, record the appropriate section
1569 record_alignment (now_seg, 2);
1571 if (mapstate == state)
1572 /* The mapping symbol has already been emitted.
1573 There is nothing else to do. */
1576 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1577 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1578 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1579 evaluated later in the next else. */
1581 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1583 /* Only add the symbol if the offset is > 0:
1584 if we're at the first frag, check it's size > 0;
1585 if we're not at the first frag, then for sure
1586 the offset is > 0. */
1587 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1588 const int add_symbol = (frag_now != frag_first)
1589 || (frag_now_fix () > 0);
1592 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1596 mapping_state_2 (state, 0);
1599 /* Same as mapping_state, but MAX_CHARS bytes have already been
1600 allocated. Put the mapping symbol that far back. */
1603 mapping_state_2 (enum mstate state, int max_chars)
1605 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1607 if (!SEG_NORMAL (now_seg))
1610 if (mapstate == state)
1611 /* The mapping symbol has already been emitted.
1612 There is nothing else to do. */
1615 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1616 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1619 #define mapping_state(x) /* nothing */
1620 #define mapping_state_2(x, y) /* nothing */
1623 /* Directives: sectioning and alignment. */
1626 s_bss (int ignore ATTRIBUTE_UNUSED)
1628 /* We don't support putting frags in the BSS segment, we fake it by
1629 marking in_bss, then looking at s_skip for clues. */
1630 subseg_set (bss_section, 0);
1631 demand_empty_rest_of_line ();
1632 mapping_state (MAP_DATA);
1636 s_even (int ignore ATTRIBUTE_UNUSED)
1638 /* Never make frag if expect extra pass. */
1640 frag_align (1, 0, 0);
1642 record_alignment (now_seg, 1);
1644 demand_empty_rest_of_line ();
1647 /* Directives: Literal pools. */
1649 static literal_pool *
1650 find_literal_pool (int size)
1654 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1656 if (pool->section == now_seg
1657 && pool->sub_section == now_subseg && pool->size == size)
1664 static literal_pool *
1665 find_or_make_literal_pool (int size)
1667 /* Next literal pool ID number. */
1668 static unsigned int latest_pool_num = 1;
1671 pool = find_literal_pool (size);
1675 /* Create a new pool. */
1676 pool = XNEW (literal_pool);
1680 /* Currently we always put the literal pool in the current text
1681 section. If we were generating "small" model code where we
1682 knew that all code and initialised data was within 1MB then
1683 we could output literals to mergeable, read-only data
1686 pool->next_free_entry = 0;
1687 pool->section = now_seg;
1688 pool->sub_section = now_subseg;
1690 pool->next = list_of_pools;
1691 pool->symbol = NULL;
1693 /* Add it to the list. */
1694 list_of_pools = pool;
1697 /* New pools, and emptied pools, will have a NULL symbol. */
1698 if (pool->symbol == NULL)
1700 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1701 (valueT) 0, &zero_address_frag);
1702 pool->id = latest_pool_num++;
1709 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1710 Return TRUE on success, otherwise return FALSE. */
1712 add_to_lit_pool (expressionS *exp, int size)
1717 pool = find_or_make_literal_pool (size);
1719 /* Check if this literal value is already in the pool. */
1720 for (entry = 0; entry < pool->next_free_entry; entry++)
1722 expressionS * litexp = & pool->literals[entry].exp;
1724 if ((litexp->X_op == exp->X_op)
1725 && (exp->X_op == O_constant)
1726 && (litexp->X_add_number == exp->X_add_number)
1727 && (litexp->X_unsigned == exp->X_unsigned))
1730 if ((litexp->X_op == exp->X_op)
1731 && (exp->X_op == O_symbol)
1732 && (litexp->X_add_number == exp->X_add_number)
1733 && (litexp->X_add_symbol == exp->X_add_symbol)
1734 && (litexp->X_op_symbol == exp->X_op_symbol))
1738 /* Do we need to create a new entry? */
1739 if (entry == pool->next_free_entry)
1741 if (entry >= MAX_LITERAL_POOL_SIZE)
1743 set_syntax_error (_("literal pool overflow"));
1747 pool->literals[entry].exp = *exp;
1748 pool->next_free_entry += 1;
1749 if (exp->X_op == O_big)
1751 /* PR 16688: Bignums are held in a single global array. We must
1752 copy and preserve that value now, before it is overwritten. */
1753 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1755 memcpy (pool->literals[entry].bignum, generic_bignum,
1756 CHARS_PER_LITTLENUM * exp->X_add_number);
1759 pool->literals[entry].bignum = NULL;
1762 exp->X_op = O_symbol;
1763 exp->X_add_number = ((int) entry) * size;
1764 exp->X_add_symbol = pool->symbol;
1769 /* Can't use symbol_new here, so have to create a symbol and then at
1770 a later date assign it a value. That's what these functions do. */
1773 symbol_locate (symbolS * symbolP,
1774 const char *name,/* It is copied, the caller can modify. */
1775 segT segment, /* Segment identifier (SEG_<something>). */
1776 valueT valu, /* Symbol value. */
1777 fragS * frag) /* Associated fragment. */
1780 char *preserved_copy_of_name;
1782 name_length = strlen (name) + 1; /* +1 for \0. */
1783 obstack_grow (¬es, name, name_length);
1784 preserved_copy_of_name = obstack_finish (¬es);
1786 #ifdef tc_canonicalize_symbol_name
1787 preserved_copy_of_name =
1788 tc_canonicalize_symbol_name (preserved_copy_of_name);
1791 S_SET_NAME (symbolP, preserved_copy_of_name);
1793 S_SET_SEGMENT (symbolP, segment);
1794 S_SET_VALUE (symbolP, valu);
1795 symbol_clear_list_pointers (symbolP);
1797 symbol_set_frag (symbolP, frag);
1799 /* Link to end of symbol chain. */
1801 extern int symbol_table_frozen;
1803 if (symbol_table_frozen)
1807 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1809 obj_symbol_new_hook (symbolP);
1811 #ifdef tc_symbol_new_hook
1812 tc_symbol_new_hook (symbolP);
1816 verify_symbol_chain (symbol_rootP, symbol_lastP);
1817 #endif /* DEBUG_SYMS */
1822 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1829 for (align = 2; align <= 4; align++)
1831 int size = 1 << align;
1833 pool = find_literal_pool (size);
1834 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1837 /* Align pool as you have word accesses.
1838 Only make a frag if we have to. */
1840 frag_align (align, 0, 0);
1842 mapping_state (MAP_DATA);
1844 record_alignment (now_seg, align);
1846 sprintf (sym_name, "$$lit_\002%x", pool->id);
1848 symbol_locate (pool->symbol, sym_name, now_seg,
1849 (valueT) frag_now_fix (), frag_now);
1850 symbol_table_insert (pool->symbol);
1852 for (entry = 0; entry < pool->next_free_entry; entry++)
1854 expressionS * exp = & pool->literals[entry].exp;
1856 if (exp->X_op == O_big)
1858 /* PR 16688: Restore the global bignum value. */
1859 gas_assert (pool->literals[entry].bignum != NULL);
1860 memcpy (generic_bignum, pool->literals[entry].bignum,
1861 CHARS_PER_LITTLENUM * exp->X_add_number);
1864 /* First output the expression in the instruction to the pool. */
1865 emit_expr (exp, size); /* .word|.xword */
1867 if (exp->X_op == O_big)
1869 free (pool->literals[entry].bignum);
1870 pool->literals[entry].bignum = NULL;
1874 /* Mark the pool as empty. */
1875 pool->next_free_entry = 0;
1876 pool->symbol = NULL;
1881 /* Forward declarations for functions below, in the MD interface
1883 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1884 static struct reloc_table_entry * find_reloc_table_entry (char **);
1886 /* Directives: Data. */
1887 /* N.B. the support for relocation suffix in this directive needs to be
1888 implemented properly. */
1891 s_aarch64_elf_cons (int nbytes)
1895 #ifdef md_flush_pending_output
1896 md_flush_pending_output ();
1899 if (is_it_end_of_statement ())
1901 demand_empty_rest_of_line ();
1905 #ifdef md_cons_align
1906 md_cons_align (nbytes);
1909 mapping_state (MAP_DATA);
1912 struct reloc_table_entry *reloc;
1916 if (exp.X_op != O_symbol)
1917 emit_expr (&exp, (unsigned int) nbytes);
1920 skip_past_char (&input_line_pointer, '#');
1921 if (skip_past_char (&input_line_pointer, ':'))
1923 reloc = find_reloc_table_entry (&input_line_pointer);
1925 as_bad (_("unrecognized relocation suffix"));
1927 as_bad (_("unimplemented relocation suffix"));
1928 ignore_rest_of_line ();
1932 emit_expr (&exp, (unsigned int) nbytes);
1935 while (*input_line_pointer++ == ',');
1937 /* Put terminator back into stream. */
1938 input_line_pointer--;
1939 demand_empty_rest_of_line ();
1942 #endif /* OBJ_ELF */
1944 /* Output a 32-bit word, but mark as an instruction. */
1947 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1951 #ifdef md_flush_pending_output
1952 md_flush_pending_output ();
1955 if (is_it_end_of_statement ())
1957 demand_empty_rest_of_line ();
1961 /* Sections are assumed to start aligned. In executable section, there is no
1962 MAP_DATA symbol pending. So we only align the address during
1963 MAP_DATA --> MAP_INSN transition.
1964 For other sections, this is not guaranteed. */
1965 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1966 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1967 frag_align_code (2, 0);
1970 mapping_state (MAP_INSN);
1976 if (exp.X_op != O_constant)
1978 as_bad (_("constant expression required"));
1979 ignore_rest_of_line ();
1983 if (target_big_endian)
1985 unsigned int val = exp.X_add_number;
1986 exp.X_add_number = SWAP_32 (val);
1988 emit_expr (&exp, 4);
1990 while (*input_line_pointer++ == ',');
1992 /* Put terminator back into stream. */
1993 input_line_pointer--;
1994 demand_empty_rest_of_line ();
1998 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2000 demand_empty_rest_of_line ();
2001 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2002 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2006 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2009 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2015 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2016 BFD_RELOC_AARCH64_TLSDESC_ADD);
2018 demand_empty_rest_of_line ();
2021 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2024 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2028 /* Since we're just labelling the code, there's no need to define a
2031 /* Make sure there is enough room in this frag for the following
2032 blr. This trick only works if the blr follows immediately after
2033 the .tlsdesc directive. */
2035 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2036 BFD_RELOC_AARCH64_TLSDESC_CALL);
2038 demand_empty_rest_of_line ();
2041 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2044 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2050 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2051 BFD_RELOC_AARCH64_TLSDESC_LDR);
2053 demand_empty_rest_of_line ();
2055 #endif /* OBJ_ELF */
2057 static void s_aarch64_arch (int);
2058 static void s_aarch64_cpu (int);
2059 static void s_aarch64_arch_extension (int);
2061 /* This table describes all the machine specific pseudo-ops the assembler
2062 has to support. The fields are:
2063 pseudo-op name without dot
2064 function to call to execute this pseudo-op
2065 Integer arg to pass to the function. */
2067 const pseudo_typeS md_pseudo_table[] = {
2068 /* Never called because '.req' does not start a line. */
2070 {"unreq", s_unreq, 0},
2072 {"even", s_even, 0},
2073 {"ltorg", s_ltorg, 0},
2074 {"pool", s_ltorg, 0},
2075 {"cpu", s_aarch64_cpu, 0},
2076 {"arch", s_aarch64_arch, 0},
2077 {"arch_extension", s_aarch64_arch_extension, 0},
2078 {"inst", s_aarch64_inst, 0},
2079 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2081 {"tlsdescadd", s_tlsdescadd, 0},
2082 {"tlsdesccall", s_tlsdesccall, 0},
2083 {"tlsdescldr", s_tlsdescldr, 0},
2084 {"word", s_aarch64_elf_cons, 4},
2085 {"long", s_aarch64_elf_cons, 4},
2086 {"xword", s_aarch64_elf_cons, 8},
2087 {"dword", s_aarch64_elf_cons, 8},
2093 /* Check whether STR points to a register name followed by a comma or the
2094 end of line; REG_TYPE indicates which register types are checked
2095 against. Return TRUE if STR is such a register name; otherwise return
2096 FALSE. The function does not intend to produce any diagnostics, but since
2097 the register parser aarch64_reg_parse, which is called by this function,
2098 does produce diagnostics, we call clear_error to clear any diagnostics
2099 that may be generated by aarch64_reg_parse.
2100 Also, the function returns FALSE directly if there is any user error
2101 present at the function entry. This prevents the existing diagnostics
2102 state from being spoiled.
2103 The function currently serves parse_constant_immediate and
2104 parse_big_immediate only. */
2106 reg_name_p (char *str, aarch64_reg_type reg_type)
2110 /* Prevent the diagnostics state from being spoiled. */
2114 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2116 /* Clear the parsing error that may be set by the reg parser. */
2119 if (reg == PARSE_FAIL)
2122 skip_whitespace (str);
2123 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2129 /* Parser functions used exclusively in instruction operands. */
2131 /* Parse an immediate expression which may not be constant.
2133 To prevent the expression parser from pushing a register name
2134 into the symbol table as an undefined symbol, firstly a check is
2135 done to find out whether STR is a register of type REG_TYPE followed
2136 by a comma or the end of line. Return FALSE if STR is such a string. */
2139 parse_immediate_expression (char **str, expressionS *exp,
2140 aarch64_reg_type reg_type)
2142 if (reg_name_p (*str, reg_type))
2144 set_recoverable_error (_("immediate operand required"));
2148 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2150 if (exp->X_op == O_absent)
2152 set_fatal_syntax_error (_("missing immediate expression"));
2159 /* Constant immediate-value read function for use in insn parsing.
2160 STR points to the beginning of the immediate (with the optional
2161 leading #); *VAL receives the value. REG_TYPE says which register
2162 names should be treated as registers rather than as symbolic immediates.
2164 Return TRUE on success; otherwise return FALSE. */
2167 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2171 if (! parse_immediate_expression (str, &exp, reg_type))
2174 if (exp.X_op != O_constant)
2176 set_syntax_error (_("constant expression required"));
2180 *val = exp.X_add_number;
2185 encode_imm_float_bits (uint32_t imm)
2187 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2188 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2191 /* Return TRUE if the single-precision floating-point value encoded in IMM
2192 can be expressed in the AArch64 8-bit signed floating-point format with
2193 3-bit exponent and normalized 4 bits of precision; in other words, the
2194 floating-point value must be expressable as
2195 (+/-) n / 16 * power (2, r)
2196 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2199 aarch64_imm_float_p (uint32_t imm)
2201 /* If a single-precision floating-point value has the following bit
2202 pattern, it can be expressed in the AArch64 8-bit floating-point
2205 3 32222222 2221111111111
2206 1 09876543 21098765432109876543210
2207 n Eeeeeexx xxxx0000000000000000000
2209 where n, e and each x are either 0 or 1 independently, with
2214 /* Prepare the pattern for 'Eeeeee'. */
2215 if (((imm >> 30) & 0x1) == 0)
2216 pattern = 0x3e000000;
2218 pattern = 0x40000000;
2220 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2221 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2224 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2225 as an IEEE float without any loss of precision. Store the value in
2229 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2231 /* If a double-precision floating-point value has the following bit
2232 pattern, it can be expressed in a float:
2234 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2235 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2236 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2238 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2239 if Eeee_eeee != 1111_1111
2241 where n, e, s and S are either 0 or 1 independently and where ~ is the
2245 uint32_t high32 = imm >> 32;
2246 uint32_t low32 = imm;
2248 /* Lower 29 bits need to be 0s. */
2249 if ((imm & 0x1fffffff) != 0)
2252 /* Prepare the pattern for 'Eeeeeeeee'. */
2253 if (((high32 >> 30) & 0x1) == 0)
2254 pattern = 0x38000000;
2256 pattern = 0x40000000;
2259 if ((high32 & 0x78000000) != pattern)
2262 /* Check Eeee_eeee != 1111_1111. */
2263 if ((high32 & 0x7ff00000) == 0x47f00000)
2266 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2267 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2268 | (low32 >> 29)); /* 3 S bits. */
2272 /* Return true if we should treat OPERAND as a double-precision
2273 floating-point operand rather than a single-precision one. */
2275 double_precision_operand_p (const aarch64_opnd_info *operand)
2277 /* Check for unsuffixed SVE registers, which are allowed
2278 for LDR and STR but not in instructions that require an
2279 immediate. We get better error messages if we arbitrarily
2280 pick one size, parse the immediate normally, and then
2281 report the match failure in the normal way. */
2282 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2283 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2286 /* Parse a floating-point immediate. Return TRUE on success and return the
2287 value in *IMMED in the format of IEEE754 single-precision encoding.
2288 *CCP points to the start of the string; DP_P is TRUE when the immediate
2289 is expected to be in double-precision (N.B. this only matters when
2290 hexadecimal representation is involved). REG_TYPE says which register
2291 names should be treated as registers rather than as symbolic immediates.
2293 This routine accepts any IEEE float; it is up to the callers to reject
2297 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2298 aarch64_reg_type reg_type)
2302 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2304 unsigned fpword = 0;
2305 bfd_boolean hex_p = FALSE;
2307 skip_past_char (&str, '#');
2310 skip_whitespace (fpnum);
2312 if (strncmp (fpnum, "0x", 2) == 0)
2314 /* Support the hexadecimal representation of the IEEE754 encoding.
2315 Double-precision is expected when DP_P is TRUE, otherwise the
2316 representation should be in single-precision. */
2317 if (! parse_constant_immediate (&str, &val, reg_type))
2322 if (!can_convert_double_to_float (val, &fpword))
2325 else if ((uint64_t) val > 0xffffffff)
2332 else if (reg_name_p (str, reg_type))
2334 set_recoverable_error (_("immediate operand required"));
2342 if ((str = atof_ieee (str, 's', words)) == NULL)
2345 /* Our FP word must be 32 bits (single-precision FP). */
2346 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2348 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2358 set_fatal_syntax_error (_("invalid floating-point constant"));
2362 /* Less-generic immediate-value read function with the possibility of loading
2363 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2366 To prevent the expression parser from pushing a register name into the
2367 symbol table as an undefined symbol, a check is firstly done to find
2368 out whether STR is a register of type REG_TYPE followed by a comma or
2369 the end of line. Return FALSE if STR is such a register. */
2372 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2376 if (reg_name_p (ptr, reg_type))
2378 set_syntax_error (_("immediate operand required"));
2382 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2384 if (inst.reloc.exp.X_op == O_constant)
2385 *imm = inst.reloc.exp.X_add_number;
2392 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2393 if NEED_LIBOPCODES is non-zero, the fixup will need
2394 assistance from the libopcodes. */
2397 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2398 const aarch64_opnd_info *operand,
2399 int need_libopcodes_p)
2401 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2402 reloc->opnd = operand->type;
2403 if (need_libopcodes_p)
2404 reloc->need_libopcodes_p = 1;
2407 /* Return TRUE if the instruction needs to be fixed up later internally by
2408 the GAS; otherwise return FALSE. */
2410 static inline bfd_boolean
2411 aarch64_gas_internal_fixup_p (void)
2413 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2416 /* Assign the immediate value to the relevant field in *OPERAND if
2417 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2418 needs an internal fixup in a later stage.
2419 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2420 IMM.VALUE that may get assigned with the constant. */
2422 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2423 aarch64_opnd_info *operand,
2425 int need_libopcodes_p,
2428 if (reloc->exp.X_op == O_constant)
2431 operand->addr.offset.imm = reloc->exp.X_add_number;
2433 operand->imm.value = reloc->exp.X_add_number;
2434 reloc->type = BFD_RELOC_UNUSED;
2438 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2439 /* Tell libopcodes to ignore this operand or not. This is helpful
2440 when one of the operands needs to be fixed up later but we need
2441 libopcodes to check the other operands. */
2442 operand->skip = skip_p;
2446 /* Relocation modifiers. Each entry in the table contains the textual
2447 name for the relocation which may be placed before a symbol used as
2448 a load/store offset, or add immediate. It must be surrounded by a
2449 leading and trailing colon, for example:
2451 ldr x0, [x1, #:rello:varsym]
2452 add x0, x1, #:rello:varsym */
2454 struct reloc_table_entry
2458 bfd_reloc_code_real_type adr_type;
2459 bfd_reloc_code_real_type adrp_type;
2460 bfd_reloc_code_real_type movw_type;
2461 bfd_reloc_code_real_type add_type;
2462 bfd_reloc_code_real_type ldst_type;
2463 bfd_reloc_code_real_type ld_literal_type;
2466 static struct reloc_table_entry reloc_table[] = {
2467 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2472 BFD_RELOC_AARCH64_ADD_LO12,
2473 BFD_RELOC_AARCH64_LDST_LO12,
2476 /* Higher 21 bits of pc-relative page offset: ADRP */
2479 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2485 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2488 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2494 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2498 BFD_RELOC_AARCH64_MOVW_G0,
2503 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2507 BFD_RELOC_AARCH64_MOVW_G0_S,
2512 /* Less significant bits 0-15 of address/value: MOVK, no check */
2516 BFD_RELOC_AARCH64_MOVW_G0_NC,
2521 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2525 BFD_RELOC_AARCH64_MOVW_G1,
2530 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2534 BFD_RELOC_AARCH64_MOVW_G1_S,
2539 /* Less significant bits 16-31 of address/value: MOVK, no check */
2543 BFD_RELOC_AARCH64_MOVW_G1_NC,
2548 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2552 BFD_RELOC_AARCH64_MOVW_G2,
2557 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2561 BFD_RELOC_AARCH64_MOVW_G2_S,
2566 /* Less significant bits 32-47 of address/value: MOVK, no check */
2570 BFD_RELOC_AARCH64_MOVW_G2_NC,
2575 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2579 BFD_RELOC_AARCH64_MOVW_G3,
2584 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2588 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2593 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2597 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2602 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2606 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2611 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2615 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2620 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2624 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2629 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2633 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2638 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2642 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2647 /* Get to the page containing GOT entry for a symbol. */
2650 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2654 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2656 /* 12 bit offset into the page containing GOT entry for that symbol. */
2662 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2665 /* 0-15 bits of address/value: MOVk, no check. */
2669 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2674 /* Most significant bits 16-31 of address/value: MOVZ. */
2678 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2683 /* 15 bit offset into the page containing GOT entry for that symbol. */
2689 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2692 /* Get to the page containing GOT TLS entry for a symbol */
2693 {"gottprel_g0_nc", 0,
2696 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2701 /* Get to the page containing GOT TLS entry for a symbol */
2705 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2710 /* Get to the page containing GOT TLS entry for a symbol */
2712 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2713 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2719 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2724 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2728 /* Lower 16 bits address/value: MOVk. */
2732 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2737 /* Most significant bits 16-31 of address/value: MOVZ. */
2741 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2746 /* Get to the page containing GOT TLS entry for a symbol */
2748 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2749 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2753 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2755 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2760 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2761 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2764 /* Get to the page containing GOT TLS entry for a symbol.
2765 The same as GD, we allocate two consecutive GOT slots
2766 for module index and module offset, the only difference
2767 with GD is the module offset should be initialized to
2768 zero without any outstanding runtime relocation. */
2770 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2771 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2777 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2778 {"tlsldm_lo12_nc", 0,
2782 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2786 /* 12 bit offset into the module TLS base address. */
2791 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2792 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2795 /* Same as dtprel_lo12, no overflow check. */
2796 {"dtprel_lo12_nc", 0,
2800 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2801 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2804 /* bits[23:12] of offset to the module TLS base address. */
2809 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2813 /* bits[15:0] of offset to the module TLS base address. */
2817 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2822 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2826 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2831 /* bits[31:16] of offset to the module TLS base address. */
2835 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2840 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2844 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2849 /* bits[47:32] of offset to the module TLS base address. */
2853 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2858 /* Lower 16 bit offset into GOT entry for a symbol */
2859 {"tlsdesc_off_g0_nc", 0,
2862 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2867 /* Higher 16 bit offset into GOT entry for a symbol */
2868 {"tlsdesc_off_g1", 0,
2871 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2876 /* Get to the page containing GOT TLS entry for a symbol */
2879 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2883 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2885 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2886 {"gottprel_lo12", 0,
2891 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2894 /* Get tp offset for a symbol. */
2899 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2903 /* Get tp offset for a symbol. */
2908 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2909 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2912 /* Get tp offset for a symbol. */
2917 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2921 /* Get tp offset for a symbol. */
2922 {"tprel_lo12_nc", 0,
2926 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2927 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2930 /* Most significant bits 32-47 of address/value: MOVZ. */
2934 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2939 /* Most significant bits 16-31 of address/value: MOVZ. */
2943 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2948 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2952 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2957 /* Most significant bits 0-15 of address/value: MOVZ. */
2961 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2966 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2970 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2975 /* 15bit offset from got entry to base address of GOT table. */
2981 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2984 /* 14bit offset from got entry to base address of GOT table. */
2990 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2994 /* Given the address of a pointer pointing to the textual name of a
2995 relocation as may appear in assembler source, attempt to find its
2996 details in reloc_table. The pointer will be updated to the character
2997 after the trailing colon. On failure, NULL will be returned;
2998 otherwise return the reloc_table_entry. */
3000 static struct reloc_table_entry *
3001 find_reloc_table_entry (char **str)
3004 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3006 int length = strlen (reloc_table[i].name);
3008 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3009 && (*str)[length] == ':')
3011 *str += (length + 1);
3012 return &reloc_table[i];
3019 /* Mode argument to parse_shift and parser_shifter_operand. */
3020 enum parse_shift_mode
3022 SHIFTED_NONE, /* no shifter allowed */
3023 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3025 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3027 SHIFTED_LSL, /* bare "lsl #n" */
3028 SHIFTED_MUL, /* bare "mul #n" */
3029 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3030 SHIFTED_MUL_VL, /* "mul vl" */
3031 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3034 /* Parse a <shift> operator on an AArch64 data processing instruction.
3035 Return TRUE on success; otherwise return FALSE. */
3037 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3039 const struct aarch64_name_value_pair *shift_op;
3040 enum aarch64_modifier_kind kind;
3046 for (p = *str; ISALPHA (*p); p++)
3051 set_syntax_error (_("shift expression expected"));
3055 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
3057 if (shift_op == NULL)
3059 set_syntax_error (_("shift operator expected"));
3063 kind = aarch64_get_operand_modifier (shift_op);
3065 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3067 set_syntax_error (_("invalid use of 'MSL'"));
3071 if (kind == AARCH64_MOD_MUL
3072 && mode != SHIFTED_MUL
3073 && mode != SHIFTED_MUL_VL)
3075 set_syntax_error (_("invalid use of 'MUL'"));
3081 case SHIFTED_LOGIC_IMM:
3082 if (aarch64_extend_operator_p (kind))
3084 set_syntax_error (_("extending shift is not permitted"));
3089 case SHIFTED_ARITH_IMM:
3090 if (kind == AARCH64_MOD_ROR)
3092 set_syntax_error (_("'ROR' shift is not permitted"));
3098 if (kind != AARCH64_MOD_LSL)
3100 set_syntax_error (_("only 'LSL' shift is permitted"));
3106 if (kind != AARCH64_MOD_MUL)
3108 set_syntax_error (_("only 'MUL' is permitted"));
3113 case SHIFTED_MUL_VL:
3114 /* "MUL VL" consists of two separate tokens. Require the first
3115 token to be "MUL" and look for a following "VL". */
3116 if (kind == AARCH64_MOD_MUL)
3118 skip_whitespace (p);
3119 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3122 kind = AARCH64_MOD_MUL_VL;
3126 set_syntax_error (_("only 'MUL VL' is permitted"));
3129 case SHIFTED_REG_OFFSET:
3130 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3131 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3133 set_fatal_syntax_error
3134 (_("invalid shift for the register offset addressing mode"));
3139 case SHIFTED_LSL_MSL:
3140 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3142 set_syntax_error (_("invalid shift operator"));
3151 /* Whitespace can appear here if the next thing is a bare digit. */
3152 skip_whitespace (p);
3154 /* Parse shift amount. */
3156 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3157 exp.X_op = O_absent;
3160 if (is_immediate_prefix (*p))
3165 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3167 if (kind == AARCH64_MOD_MUL_VL)
3168 /* For consistency, give MUL VL the same shift amount as an implicit
3170 operand->shifter.amount = 1;
3171 else if (exp.X_op == O_absent)
3173 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3175 set_syntax_error (_("missing shift amount"));
3178 operand->shifter.amount = 0;
3180 else if (exp.X_op != O_constant)
3182 set_syntax_error (_("constant shift amount required"));
3185 /* For parsing purposes, MUL #n has no inherent range. The range
3186 depends on the operand and will be checked by operand-specific
3188 else if (kind != AARCH64_MOD_MUL
3189 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3191 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3196 operand->shifter.amount = exp.X_add_number;
3197 operand->shifter.amount_present = 1;
3200 operand->shifter.operator_present = 1;
3201 operand->shifter.kind = kind;
3207 /* Parse a <shifter_operand> for a data processing instruction:
3210 #<immediate>, LSL #imm
3212 Validation of immediate operands is deferred to md_apply_fix.
3214 Return TRUE on success; otherwise return FALSE. */
3217 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3218 enum parse_shift_mode mode)
3222 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3227 /* Accept an immediate expression. */
3228 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3231 /* Accept optional LSL for arithmetic immediate values. */
3232 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3233 if (! parse_shift (&p, operand, SHIFTED_LSL))
3236 /* Not accept any shifter for logical immediate values. */
3237 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3238 && parse_shift (&p, operand, mode))
3240 set_syntax_error (_("unexpected shift operator"));
3248 /* Parse a <shifter_operand> for a data processing instruction:
3253 #<immediate>, LSL #imm
3255 where <shift> is handled by parse_shift above, and the last two
3256 cases are handled by the function above.
3258 Validation of immediate operands is deferred to md_apply_fix.
3260 Return TRUE on success; otherwise return FALSE. */
3263 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3264 enum parse_shift_mode mode)
3266 const reg_entry *reg;
3267 aarch64_opnd_qualifier_t qualifier;
3268 enum aarch64_operand_class opd_class
3269 = aarch64_get_operand_class (operand->type);
3271 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3274 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3276 set_syntax_error (_("unexpected register in the immediate operand"));
3280 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3282 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3286 operand->reg.regno = reg->number;
3287 operand->qualifier = qualifier;
3289 /* Accept optional shift operation on register. */
3290 if (! skip_past_comma (str))
3293 if (! parse_shift (str, operand, mode))
3298 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3301 (_("integer register expected in the extended/shifted operand "
3306 /* We have a shifted immediate variable. */
3307 return parse_shifter_operand_imm (str, operand, mode);
3310 /* Return TRUE on success; return FALSE otherwise. */
3313 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3314 enum parse_shift_mode mode)
3318 /* Determine if we have the sequence of characters #: or just :
3319 coming next. If we do, then we check for a :rello: relocation
3320 modifier. If we don't, punt the whole lot to
3321 parse_shifter_operand. */
3323 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3325 struct reloc_table_entry *entry;
3333 /* Try to parse a relocation. Anything else is an error. */
3334 if (!(entry = find_reloc_table_entry (str)))
3336 set_syntax_error (_("unknown relocation modifier"));
3340 if (entry->add_type == 0)
3343 (_("this relocation modifier is not allowed on this instruction"));
3347 /* Save str before we decompose it. */
3350 /* Next, we parse the expression. */
3351 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3354 /* Record the relocation type (use the ADD variant here). */
3355 inst.reloc.type = entry->add_type;
3356 inst.reloc.pc_rel = entry->pc_rel;
3358 /* If str is empty, we've reached the end, stop here. */
3362 /* Otherwise, we have a shifted reloc modifier, so rewind to
3363 recover the variable name and continue parsing for the shifter. */
3365 return parse_shifter_operand_imm (str, operand, mode);
3368 return parse_shifter_operand (str, operand, mode);
3371 /* Parse all forms of an address expression. Information is written
3372 to *OPERAND and/or inst.reloc.
3374 The A64 instruction set has the following addressing modes:
3377 [base] // in SIMD ld/st structure
3378 [base{,#0}] // in ld/st exclusive
3380 [base,Xm{,LSL #imm}]
3381 [base,Xm,SXTX {#imm}]
3382 [base,Wm,(S|U)XTW {#imm}]
3387 [base],Xm // in SIMD ld/st structure
3388 PC-relative (literal)
3392 [base,Zm.D{,LSL #imm}]
3393 [base,Zm.S,(S|U)XTW {#imm}]
3394 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3398 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3399 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3400 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3402 (As a convenience, the notation "=immediate" is permitted in conjunction
3403 with the pc-relative literal load instructions to automatically place an
3404 immediate value or symbolic address in a nearby literal pool and generate
3405 a hidden label which references it.)
3407 Upon a successful parsing, the address structure in *OPERAND will be
3408 filled in the following way:
3410 .base_regno = <base>
3411 .offset.is_reg // 1 if the offset is a register
3413 .offset.regno = <Rm>
3415 For different addressing modes defined in the A64 ISA:
3418 .pcrel=0; .preind=1; .postind=0; .writeback=0
3420 .pcrel=0; .preind=1; .postind=0; .writeback=1
3422 .pcrel=0; .preind=0; .postind=1; .writeback=1
3423 PC-relative (literal)
3424 .pcrel=1; .preind=1; .postind=0; .writeback=0
3426 The shift/extension information, if any, will be stored in .shifter.
3427 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3428 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3429 corresponding register.
3431 BASE_TYPE says which types of base register should be accepted and
3432 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3433 is the type of shifter that is allowed for immediate offsets,
3434 or SHIFTED_NONE if none.
3436 In all other respects, it is the caller's responsibility to check
3437 for addressing modes not supported by the instruction, and to set
3441 parse_address_main (char **str, aarch64_opnd_info *operand,
3442 aarch64_opnd_qualifier_t *base_qualifier,
3443 aarch64_opnd_qualifier_t *offset_qualifier,
3444 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3445 enum parse_shift_mode imm_shift_mode)
3448 const reg_entry *reg;
3449 expressionS *exp = &inst.reloc.exp;
3451 *base_qualifier = AARCH64_OPND_QLF_NIL;
3452 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3453 if (! skip_past_char (&p, '['))
3455 /* =immediate or label. */
3456 operand->addr.pcrel = 1;
3457 operand->addr.preind = 1;
3459 /* #:<reloc_op>:<symbol> */
3460 skip_past_char (&p, '#');
3461 if (skip_past_char (&p, ':'))
3463 bfd_reloc_code_real_type ty;
3464 struct reloc_table_entry *entry;
3466 /* Try to parse a relocation modifier. Anything else is
3468 entry = find_reloc_table_entry (&p);
3471 set_syntax_error (_("unknown relocation modifier"));
3475 switch (operand->type)
3477 case AARCH64_OPND_ADDR_PCREL21:
3479 ty = entry->adr_type;
3483 ty = entry->ld_literal_type;
3490 (_("this relocation modifier is not allowed on this "
3496 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3498 set_syntax_error (_("invalid relocation expression"));
3502 /* #:<reloc_op>:<expr> */
3503 /* Record the relocation type. */
3504 inst.reloc.type = ty;
3505 inst.reloc.pc_rel = entry->pc_rel;
3510 if (skip_past_char (&p, '='))
3511 /* =immediate; need to generate the literal in the literal pool. */
3512 inst.gen_lit_pool = 1;
3514 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3516 set_syntax_error (_("invalid address"));
3527 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3528 if (!reg || !aarch64_check_reg_type (reg, base_type))
3530 set_syntax_error (_(get_reg_expected_msg (base_type)));
3533 operand->addr.base_regno = reg->number;
3536 if (skip_past_comma (&p))
3539 operand->addr.preind = 1;
3541 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3544 if (!aarch64_check_reg_type (reg, offset_type))
3546 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3551 operand->addr.offset.regno = reg->number;
3552 operand->addr.offset.is_reg = 1;
3553 /* Shifted index. */
3554 if (skip_past_comma (&p))
3557 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3558 /* Use the diagnostics set in parse_shift, so not set new
3559 error message here. */
3563 [base,Xm] # For vector plus scalar SVE2 indexing.
3564 [base,Xm{,LSL #imm}]
3565 [base,Xm,SXTX {#imm}]
3566 [base,Wm,(S|U)XTW {#imm}] */
3567 if (operand->shifter.kind == AARCH64_MOD_NONE
3568 || operand->shifter.kind == AARCH64_MOD_LSL
3569 || operand->shifter.kind == AARCH64_MOD_SXTX)
3571 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3573 set_syntax_error (_("invalid use of 32-bit register offset"));
3576 if (aarch64_get_qualifier_esize (*base_qualifier)
3577 != aarch64_get_qualifier_esize (*offset_qualifier)
3578 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3579 || *base_qualifier != AARCH64_OPND_QLF_S_S
3580 || *offset_qualifier != AARCH64_OPND_QLF_X))
3582 set_syntax_error (_("offset has different size from base"));
3586 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3588 set_syntax_error (_("invalid use of 64-bit register offset"));
3594 /* [Xn,#:<reloc_op>:<symbol> */
3595 skip_past_char (&p, '#');
3596 if (skip_past_char (&p, ':'))
3598 struct reloc_table_entry *entry;
3600 /* Try to parse a relocation modifier. Anything else is
3602 if (!(entry = find_reloc_table_entry (&p)))
3604 set_syntax_error (_("unknown relocation modifier"));
3608 if (entry->ldst_type == 0)
3611 (_("this relocation modifier is not allowed on this "
3616 /* [Xn,#:<reloc_op>: */
3617 /* We now have the group relocation table entry corresponding to
3618 the name in the assembler source. Next, we parse the
3620 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3622 set_syntax_error (_("invalid relocation expression"));
3626 /* [Xn,#:<reloc_op>:<expr> */
3627 /* Record the load/store relocation type. */
3628 inst.reloc.type = entry->ldst_type;
3629 inst.reloc.pc_rel = entry->pc_rel;
3633 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3635 set_syntax_error (_("invalid expression in the address"));
3639 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3640 /* [Xn,<expr>,<shifter> */
3641 if (! parse_shift (&p, operand, imm_shift_mode))
3647 if (! skip_past_char (&p, ']'))
3649 set_syntax_error (_("']' expected"));
3653 if (skip_past_char (&p, '!'))
3655 if (operand->addr.preind && operand->addr.offset.is_reg)
3657 set_syntax_error (_("register offset not allowed in pre-indexed "
3658 "addressing mode"));
3662 operand->addr.writeback = 1;
3664 else if (skip_past_comma (&p))
3667 operand->addr.postind = 1;
3668 operand->addr.writeback = 1;
3670 if (operand->addr.preind)
3672 set_syntax_error (_("cannot combine pre- and post-indexing"));
3676 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3680 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3682 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3686 operand->addr.offset.regno = reg->number;
3687 operand->addr.offset.is_reg = 1;
3689 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3692 set_syntax_error (_("invalid expression in the address"));
3697 /* If at this point neither .preind nor .postind is set, we have a
3698 bare [Rn]{!}; reject [Rn]! accept [Rn] as a shorthand for [Rn,#0].
3699 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3701 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3703 if (operand->addr.writeback)
3706 set_syntax_error (_("missing offset in the pre-indexed address"));
3710 operand->addr.preind = 1;
3711 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3713 operand->addr.offset.is_reg = 1;
3714 operand->addr.offset.regno = REG_ZR;
3715 *offset_qualifier = AARCH64_OPND_QLF_X;
3719 inst.reloc.exp.X_op = O_constant;
3720 inst.reloc.exp.X_add_number = 0;
3728 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3731 parse_address (char **str, aarch64_opnd_info *operand)
3733 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3734 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3735 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3738 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3739 The arguments have the same meaning as for parse_address_main.
3740 Return TRUE on success. */
3742 parse_sve_address (char **str, aarch64_opnd_info *operand,
3743 aarch64_opnd_qualifier_t *base_qualifier,
3744 aarch64_opnd_qualifier_t *offset_qualifier)
3746 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3747 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3751 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3752 Return TRUE on success; otherwise return FALSE. */
3754 parse_half (char **str, int *internal_fixup_p)
3758 skip_past_char (&p, '#');
3760 gas_assert (internal_fixup_p);
3761 *internal_fixup_p = 0;
3765 struct reloc_table_entry *entry;
3767 /* Try to parse a relocation. Anything else is an error. */
3769 if (!(entry = find_reloc_table_entry (&p)))
3771 set_syntax_error (_("unknown relocation modifier"));
3775 if (entry->movw_type == 0)
3778 (_("this relocation modifier is not allowed on this instruction"));
3782 inst.reloc.type = entry->movw_type;
3785 *internal_fixup_p = 1;
3787 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3794 /* Parse an operand for an ADRP instruction:
3796 Return TRUE on success; otherwise return FALSE. */
3799 parse_adrp (char **str)
3806 struct reloc_table_entry *entry;
3808 /* Try to parse a relocation. Anything else is an error. */
3810 if (!(entry = find_reloc_table_entry (&p)))
3812 set_syntax_error (_("unknown relocation modifier"));
3816 if (entry->adrp_type == 0)
3819 (_("this relocation modifier is not allowed on this instruction"));
3823 inst.reloc.type = entry->adrp_type;
3826 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3828 inst.reloc.pc_rel = 1;
3830 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3837 /* Miscellaneous. */
3839 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3840 of SIZE tokens in which index I gives the token for field value I,
3841 or is null if field value I is invalid. REG_TYPE says which register
3842 names should be treated as registers rather than as symbolic immediates.
3844 Return true on success, moving *STR past the operand and storing the
3845 field value in *VAL. */
3848 parse_enum_string (char **str, int64_t *val, const char *const *array,
3849 size_t size, aarch64_reg_type reg_type)
3855 /* Match C-like tokens. */
3857 while (ISALNUM (*q))
3860 for (i = 0; i < size; ++i)
3862 && strncasecmp (array[i], p, q - p) == 0
3863 && array[i][q - p] == 0)
3870 if (!parse_immediate_expression (&p, &exp, reg_type))
3873 if (exp.X_op == O_constant
3874 && (uint64_t) exp.X_add_number < size)
3876 *val = exp.X_add_number;
3881 /* Use the default error for this operand. */
3885 /* Parse an option for a preload instruction. Returns the encoding for the
3886 option, or PARSE_FAIL. */
3889 parse_pldop (char **str)
3892 const struct aarch64_name_value_pair *o;
3895 while (ISALNUM (*q))
3898 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3906 /* Parse an option for a barrier instruction. Returns the encoding for the
3907 option, or PARSE_FAIL. */
3910 parse_barrier (char **str)
3913 const asm_barrier_opt *o;
3916 while (ISALPHA (*q))
3919 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3927 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3928 return 0 if successful. Otherwise return PARSE_FAIL. */
3931 parse_barrier_psb (char **str,
3932 const struct aarch64_name_value_pair ** hint_opt)
3935 const struct aarch64_name_value_pair *o;
3938 while (ISALPHA (*q))
3941 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3944 set_fatal_syntax_error
3945 ( _("unknown or missing option to PSB"));
3949 if (o->value != 0x11)
3951 /* PSB only accepts option name 'CSYNC'. */
3953 (_("the specified option is not accepted for PSB"));
3962 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
3963 return 0 if successful. Otherwise return PARSE_FAIL. */
3966 parse_bti_operand (char **str,
3967 const struct aarch64_name_value_pair ** hint_opt)
3970 const struct aarch64_name_value_pair *o;
3973 while (ISALPHA (*q))
3976 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3979 set_fatal_syntax_error
3980 ( _("unknown option to BTI"));
3986 /* Valid BTI operands. */
3994 (_("unknown option to BTI"));
4003 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4004 Returns the encoding for the option, or PARSE_FAIL.
4006 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4007 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4009 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4010 field, otherwise as a system register.
4014 parse_sys_reg (char **str, struct hash_control *sys_regs,
4015 int imple_defined_p, int pstatefield_p,
4020 const aarch64_sys_reg *o;
4024 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4026 *p++ = TOLOWER (*q);
4028 /* Assert that BUF be large enough. */
4029 gas_assert (p - buf == q - *str);
4031 o = hash_find (sys_regs, buf);
4034 if (!imple_defined_p)
4038 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4039 unsigned int op0, op1, cn, cm, op2;
4041 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4044 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4046 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4053 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4054 as_bad (_("selected processor does not support PSTATE field "
4056 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
4057 as_bad (_("selected processor does not support system register "
4059 if (aarch64_sys_reg_deprecated_p (o))
4060 as_warn (_("system register name '%s' is deprecated and may be "
4061 "removed in a future release"), buf);
4071 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4072 for the option, or NULL. */
4074 static const aarch64_sys_ins_reg *
4075 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
4079 const aarch64_sys_ins_reg *o;
4082 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4084 *p++ = TOLOWER (*q);
4087 o = hash_find (sys_ins_regs, buf);
4091 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
4092 as_bad (_("selected processor does not support system register "
4099 #define po_char_or_fail(chr) do { \
4100 if (! skip_past_char (&str, chr)) \
4104 #define po_reg_or_fail(regtype) do { \
4105 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4106 if (val == PARSE_FAIL) \
4108 set_default_error (); \
4113 #define po_int_reg_or_fail(reg_type) do { \
4114 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4115 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4117 set_default_error (); \
4120 info->reg.regno = reg->number; \
4121 info->qualifier = qualifier; \
4124 #define po_imm_nc_or_fail() do { \
4125 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4129 #define po_imm_or_fail(min, max) do { \
4130 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4132 if (val < min || val > max) \
4134 set_fatal_syntax_error (_("immediate value out of range "\
4135 #min " to "#max)); \
4140 #define po_enum_or_fail(array) do { \
4141 if (!parse_enum_string (&str, &val, array, \
4142 ARRAY_SIZE (array), imm_reg_type)) \
4146 #define po_misc_or_fail(expr) do { \
4151 /* encode the 12-bit imm field of Add/sub immediate */
4152 static inline uint32_t
4153 encode_addsub_imm (uint32_t imm)
4158 /* encode the shift amount field of Add/sub immediate */
4159 static inline uint32_t
4160 encode_addsub_imm_shift_amount (uint32_t cnt)
4166 /* encode the imm field of Adr instruction */
4167 static inline uint32_t
4168 encode_adr_imm (uint32_t imm)
4170 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4171 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4174 /* encode the immediate field of Move wide immediate */
4175 static inline uint32_t
4176 encode_movw_imm (uint32_t imm)
4181 /* encode the 26-bit offset of unconditional branch */
4182 static inline uint32_t
4183 encode_branch_ofs_26 (uint32_t ofs)
4185 return ofs & ((1 << 26) - 1);
4188 /* encode the 19-bit offset of conditional branch and compare & branch */
4189 static inline uint32_t
4190 encode_cond_branch_ofs_19 (uint32_t ofs)
4192 return (ofs & ((1 << 19) - 1)) << 5;
4195 /* encode the 19-bit offset of ld literal */
4196 static inline uint32_t
4197 encode_ld_lit_ofs_19 (uint32_t ofs)
4199 return (ofs & ((1 << 19) - 1)) << 5;
4202 /* Encode the 14-bit offset of test & branch. */
4203 static inline uint32_t
4204 encode_tst_branch_ofs_14 (uint32_t ofs)
4206 return (ofs & ((1 << 14) - 1)) << 5;
4209 /* Encode the 16-bit imm field of svc/hvc/smc. */
4210 static inline uint32_t
4211 encode_svc_imm (uint32_t imm)
4216 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4217 static inline uint32_t
4218 reencode_addsub_switch_add_sub (uint32_t opcode)
4220 return opcode ^ (1 << 30);
4223 static inline uint32_t
4224 reencode_movzn_to_movz (uint32_t opcode)
4226 return opcode | (1 << 30);
4229 static inline uint32_t
4230 reencode_movzn_to_movn (uint32_t opcode)
4232 return opcode & ~(1 << 30);
4235 /* Overall per-instruction processing. */
4237 /* We need to be able to fix up arbitrary expressions in some statements.
4238 This is so that we can handle symbols that are an arbitrary distance from
4239 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4240 which returns part of an address in a form which will be valid for
4241 a data instruction. We do this by pushing the expression into a symbol
4242 in the expr_section, and creating a fix for that. */
4245 fix_new_aarch64 (fragS * frag,
4247 short int size, expressionS * exp, int pc_rel, int reloc)
4257 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4261 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4268 /* Diagnostics on operands errors. */
4270 /* By default, output verbose error message.
4271 Disable the verbose error message by -mno-verbose-error. */
4272 static int verbose_error_p = 1;
4274 #ifdef DEBUG_AARCH64
4275 /* N.B. this is only for the purpose of debugging. */
4276 const char* operand_mismatch_kind_names[] =
4279 "AARCH64_OPDE_RECOVERABLE",
4280 "AARCH64_OPDE_SYNTAX_ERROR",
4281 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4282 "AARCH64_OPDE_INVALID_VARIANT",
4283 "AARCH64_OPDE_OUT_OF_RANGE",
4284 "AARCH64_OPDE_UNALIGNED",
4285 "AARCH64_OPDE_REG_LIST",
4286 "AARCH64_OPDE_OTHER_ERROR",
4288 #endif /* DEBUG_AARCH64 */
4290 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4292 When multiple errors of different kinds are found in the same assembly
4293 line, only the error of the highest severity will be picked up for
4294 issuing the diagnostics. */
4296 static inline bfd_boolean
4297 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4298 enum aarch64_operand_error_kind rhs)
4300 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4301 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4302 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4303 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4304 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4305 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4306 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4307 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4311 /* Helper routine to get the mnemonic name from the assembly instruction
4312 line; should only be called for the diagnosis purpose, as there is
4313 string copy operation involved, which may affect the runtime
4314 performance if used in elsewhere. */
4317 get_mnemonic_name (const char *str)
4319 static char mnemonic[32];
4322 /* Get the first 15 bytes and assume that the full name is included. */
4323 strncpy (mnemonic, str, 31);
4324 mnemonic[31] = '\0';
4326 /* Scan up to the end of the mnemonic, which must end in white space,
4327 '.', or end of string. */
4328 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4333 /* Append '...' to the truncated long name. */
4334 if (ptr - mnemonic == 31)
4335 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4341 reset_aarch64_instruction (aarch64_instruction *instruction)
4343 memset (instruction, '\0', sizeof (aarch64_instruction));
4344 instruction->reloc.type = BFD_RELOC_UNUSED;
4347 /* Data structures storing one user error in the assembly code related to
4350 struct operand_error_record
4352 const aarch64_opcode *opcode;
4353 aarch64_operand_error detail;
4354 struct operand_error_record *next;
4357 typedef struct operand_error_record operand_error_record;
4359 struct operand_errors
4361 operand_error_record *head;
4362 operand_error_record *tail;
4365 typedef struct operand_errors operand_errors;
4367 /* Top-level data structure reporting user errors for the current line of
4369 The way md_assemble works is that all opcodes sharing the same mnemonic
4370 name are iterated to find a match to the assembly line. In this data
4371 structure, each of the such opcodes will have one operand_error_record
4372 allocated and inserted. In other words, excessive errors related with
4373 a single opcode are disregarded. */
4374 operand_errors operand_error_report;
4376 /* Free record nodes. */
4377 static operand_error_record *free_opnd_error_record_nodes = NULL;
4379 /* Initialize the data structure that stores the operand mismatch
4380 information on assembling one line of the assembly code. */
4382 init_operand_error_report (void)
4384 if (operand_error_report.head != NULL)
4386 gas_assert (operand_error_report.tail != NULL);
4387 operand_error_report.tail->next = free_opnd_error_record_nodes;
4388 free_opnd_error_record_nodes = operand_error_report.head;
4389 operand_error_report.head = NULL;
4390 operand_error_report.tail = NULL;
4393 gas_assert (operand_error_report.tail == NULL);
4396 /* Return TRUE if some operand error has been recorded during the
4397 parsing of the current assembly line using the opcode *OPCODE;
4398 otherwise return FALSE. */
4399 static inline bfd_boolean
4400 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4402 operand_error_record *record = operand_error_report.head;
4403 return record && record->opcode == opcode;
4406 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4407 OPCODE field is initialized with OPCODE.
4408 N.B. only one record for each opcode, i.e. the maximum of one error is
4409 recorded for each instruction template. */
4412 add_operand_error_record (const operand_error_record* new_record)
4414 const aarch64_opcode *opcode = new_record->opcode;
4415 operand_error_record* record = operand_error_report.head;
4417 /* The record may have been created for this opcode. If not, we need
4419 if (! opcode_has_operand_error_p (opcode))
4421 /* Get one empty record. */
4422 if (free_opnd_error_record_nodes == NULL)
4424 record = XNEW (operand_error_record);
4428 record = free_opnd_error_record_nodes;
4429 free_opnd_error_record_nodes = record->next;
4431 record->opcode = opcode;
4432 /* Insert at the head. */
4433 record->next = operand_error_report.head;
4434 operand_error_report.head = record;
4435 if (operand_error_report.tail == NULL)
4436 operand_error_report.tail = record;
4438 else if (record->detail.kind != AARCH64_OPDE_NIL
4439 && record->detail.index <= new_record->detail.index
4440 && operand_error_higher_severity_p (record->detail.kind,
4441 new_record->detail.kind))
4443 /* In the case of multiple errors found on operands related with a
4444 single opcode, only record the error of the leftmost operand and
4445 only if the error is of higher severity. */
4446 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4447 " the existing error %s on operand %d",
4448 operand_mismatch_kind_names[new_record->detail.kind],
4449 new_record->detail.index,
4450 operand_mismatch_kind_names[record->detail.kind],
4451 record->detail.index);
4455 record->detail = new_record->detail;
4459 record_operand_error_info (const aarch64_opcode *opcode,
4460 aarch64_operand_error *error_info)
4462 operand_error_record record;
4463 record.opcode = opcode;
4464 record.detail = *error_info;
4465 add_operand_error_record (&record);
4468 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4469 error message *ERROR, for operand IDX (count from 0). */
4472 record_operand_error (const aarch64_opcode *opcode, int idx,
4473 enum aarch64_operand_error_kind kind,
4476 aarch64_operand_error info;
4477 memset(&info, 0, sizeof (info));
4481 info.non_fatal = FALSE;
4482 record_operand_error_info (opcode, &info);
4486 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4487 enum aarch64_operand_error_kind kind,
4488 const char* error, const int *extra_data)
4490 aarch64_operand_error info;
4494 info.data[0] = extra_data[0];
4495 info.data[1] = extra_data[1];
4496 info.data[2] = extra_data[2];
4497 info.non_fatal = FALSE;
4498 record_operand_error_info (opcode, &info);
4502 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4503 const char* error, int lower_bound,
4506 int data[3] = {lower_bound, upper_bound, 0};
4507 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4511 /* Remove the operand error record for *OPCODE. */
4512 static void ATTRIBUTE_UNUSED
4513 remove_operand_error_record (const aarch64_opcode *opcode)
4515 if (opcode_has_operand_error_p (opcode))
4517 operand_error_record* record = operand_error_report.head;
4518 gas_assert (record != NULL && operand_error_report.tail != NULL);
4519 operand_error_report.head = record->next;
4520 record->next = free_opnd_error_record_nodes;
4521 free_opnd_error_record_nodes = record;
4522 if (operand_error_report.head == NULL)
4524 gas_assert (operand_error_report.tail == record);
4525 operand_error_report.tail = NULL;
4530 /* Given the instruction in *INSTR, return the index of the best matched
4531 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4533 Return -1 if there is no qualifier sequence; return the first match
4534 if there is multiple matches found. */
4537 find_best_match (const aarch64_inst *instr,
4538 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4540 int i, num_opnds, max_num_matched, idx;
4542 num_opnds = aarch64_num_of_operands (instr->opcode);
4545 DEBUG_TRACE ("no operand");
4549 max_num_matched = 0;
4552 /* For each pattern. */
4553 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4556 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4558 /* Most opcodes has much fewer patterns in the list. */
4559 if (empty_qualifier_sequence_p (qualifiers))
4561 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4565 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4566 if (*qualifiers == instr->operands[j].qualifier)
4569 if (num_matched > max_num_matched)
4571 max_num_matched = num_matched;
4576 DEBUG_TRACE ("return with %d", idx);
4580 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4581 corresponding operands in *INSTR. */
4584 assign_qualifier_sequence (aarch64_inst *instr,
4585 const aarch64_opnd_qualifier_t *qualifiers)
4588 int num_opnds = aarch64_num_of_operands (instr->opcode);
4589 gas_assert (num_opnds);
4590 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4591 instr->operands[i].qualifier = *qualifiers;
4594 /* Print operands for the diagnosis purpose. */
4597 print_operands (char *buf, const aarch64_opcode *opcode,
4598 const aarch64_opnd_info *opnds)
4602 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4606 /* We regard the opcode operand info more, however we also look into
4607 the inst->operands to support the disassembling of the optional
4609 The two operand code should be the same in all cases, apart from
4610 when the operand can be optional. */
4611 if (opcode->operands[i] == AARCH64_OPND_NIL
4612 || opnds[i].type == AARCH64_OPND_NIL)
4615 /* Generate the operand string in STR. */
4616 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4621 strcat (buf, i == 0 ? " " : ", ");
4623 /* Append the operand string. */
4628 /* Send to stderr a string as information. */
4631 output_info (const char *format, ...)
4637 file = as_where (&line);
4641 fprintf (stderr, "%s:%u: ", file, line);
4643 fprintf (stderr, "%s: ", file);
4645 fprintf (stderr, _("Info: "));
4646 va_start (args, format);
4647 vfprintf (stderr, format, args);
4649 (void) putc ('\n', stderr);
4652 /* Output one operand error record. */
4655 output_operand_error_record (const operand_error_record *record, char *str)
4657 const aarch64_operand_error *detail = &record->detail;
4658 int idx = detail->index;
4659 const aarch64_opcode *opcode = record->opcode;
4660 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4661 : AARCH64_OPND_NIL);
4663 typedef void (*handler_t)(const char *format, ...);
4664 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4666 switch (detail->kind)
4668 case AARCH64_OPDE_NIL:
4671 case AARCH64_OPDE_SYNTAX_ERROR:
4672 case AARCH64_OPDE_RECOVERABLE:
4673 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4674 case AARCH64_OPDE_OTHER_ERROR:
4675 /* Use the prepared error message if there is, otherwise use the
4676 operand description string to describe the error. */
4677 if (detail->error != NULL)
4680 handler (_("%s -- `%s'"), detail->error, str);
4682 handler (_("%s at operand %d -- `%s'"),
4683 detail->error, idx + 1, str);
4687 gas_assert (idx >= 0);
4688 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4689 aarch64_get_operand_desc (opd_code), str);
4693 case AARCH64_OPDE_INVALID_VARIANT:
4694 handler (_("operand mismatch -- `%s'"), str);
4695 if (verbose_error_p)
4697 /* We will try to correct the erroneous instruction and also provide
4698 more information e.g. all other valid variants.
4700 The string representation of the corrected instruction and other
4701 valid variants are generated by
4703 1) obtaining the intermediate representation of the erroneous
4705 2) manipulating the IR, e.g. replacing the operand qualifier;
4706 3) printing out the instruction by calling the printer functions
4707 shared with the disassembler.
4709 The limitation of this method is that the exact input assembly
4710 line cannot be accurately reproduced in some cases, for example an
4711 optional operand present in the actual assembly line will be
4712 omitted in the output; likewise for the optional syntax rules,
4713 e.g. the # before the immediate. Another limitation is that the
4714 assembly symbols and relocation operations in the assembly line
4715 currently cannot be printed out in the error report. Last but not
4716 least, when there is other error(s) co-exist with this error, the
4717 'corrected' instruction may be still incorrect, e.g. given
4718 'ldnp h0,h1,[x0,#6]!'
4719 this diagnosis will provide the version:
4720 'ldnp s0,s1,[x0,#6]!'
4721 which is still not right. */
4722 size_t len = strlen (get_mnemonic_name (str));
4726 aarch64_inst *inst_base = &inst.base;
4727 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4730 reset_aarch64_instruction (&inst);
4731 inst_base->opcode = opcode;
4733 /* Reset the error report so that there is no side effect on the
4734 following operand parsing. */
4735 init_operand_error_report ();
4738 result = parse_operands (str + len, opcode)
4739 && programmer_friendly_fixup (&inst);
4740 gas_assert (result);
4741 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4742 NULL, NULL, insn_sequence);
4743 gas_assert (!result);
4745 /* Find the most matched qualifier sequence. */
4746 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4747 gas_assert (qlf_idx > -1);
4749 /* Assign the qualifiers. */
4750 assign_qualifier_sequence (inst_base,
4751 opcode->qualifiers_list[qlf_idx]);
4753 /* Print the hint. */
4754 output_info (_(" did you mean this?"));
4755 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4756 print_operands (buf, opcode, inst_base->operands);
4757 output_info (_(" %s"), buf);
4759 /* Print out other variant(s) if there is any. */
4761 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4762 output_info (_(" other valid variant(s):"));
4764 /* For each pattern. */
4765 qualifiers_list = opcode->qualifiers_list;
4766 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4768 /* Most opcodes has much fewer patterns in the list.
4769 First NIL qualifier indicates the end in the list. */
4770 if (empty_qualifier_sequence_p (*qualifiers_list))
4775 /* Mnemonics name. */
4776 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4778 /* Assign the qualifiers. */
4779 assign_qualifier_sequence (inst_base, *qualifiers_list);
4781 /* Print instruction. */
4782 print_operands (buf, opcode, inst_base->operands);
4784 output_info (_(" %s"), buf);
4790 case AARCH64_OPDE_UNTIED_OPERAND:
4791 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4792 detail->index + 1, str);
4795 case AARCH64_OPDE_OUT_OF_RANGE:
4796 if (detail->data[0] != detail->data[1])
4797 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4798 detail->error ? detail->error : _("immediate value"),
4799 detail->data[0], detail->data[1], idx + 1, str);
4801 handler (_("%s must be %d at operand %d -- `%s'"),
4802 detail->error ? detail->error : _("immediate value"),
4803 detail->data[0], idx + 1, str);
4806 case AARCH64_OPDE_REG_LIST:
4807 if (detail->data[0] == 1)
4808 handler (_("invalid number of registers in the list; "
4809 "only 1 register is expected at operand %d -- `%s'"),
4812 handler (_("invalid number of registers in the list; "
4813 "%d registers are expected at operand %d -- `%s'"),
4814 detail->data[0], idx + 1, str);
4817 case AARCH64_OPDE_UNALIGNED:
4818 handler (_("immediate value must be a multiple of "
4819 "%d at operand %d -- `%s'"),
4820 detail->data[0], idx + 1, str);
4829 /* Process and output the error message about the operand mismatching.
4831 When this function is called, the operand error information had
4832 been collected for an assembly line and there will be multiple
4833 errors in the case of multiple instruction templates; output the
4834 error message that most closely describes the problem.
4836 The errors to be printed can be filtered on printing all errors
4837 or only non-fatal errors. This distinction has to be made because
4838 the error buffer may already be filled with fatal errors we don't want to
4839 print due to the different instruction templates. */
4842 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4844 int largest_error_pos;
4845 const char *msg = NULL;
4846 enum aarch64_operand_error_kind kind;
4847 operand_error_record *curr;
4848 operand_error_record *head = operand_error_report.head;
4849 operand_error_record *record = NULL;
4851 /* No error to report. */
4855 gas_assert (head != NULL && operand_error_report.tail != NULL);
4857 /* Only one error. */
4858 if (head == operand_error_report.tail)
4860 /* If the only error is a non-fatal one and we don't want to print it,
4862 if (!non_fatal_only || head->detail.non_fatal)
4864 DEBUG_TRACE ("single opcode entry with error kind: %s",
4865 operand_mismatch_kind_names[head->detail.kind]);
4866 output_operand_error_record (head, str);
4871 /* Find the error kind of the highest severity. */
4872 DEBUG_TRACE ("multiple opcode entries with error kind");
4873 kind = AARCH64_OPDE_NIL;
4874 for (curr = head; curr != NULL; curr = curr->next)
4876 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4877 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4878 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4879 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4880 kind = curr->detail.kind;
4883 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4885 /* Pick up one of errors of KIND to report. */
4886 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4887 for (curr = head; curr != NULL; curr = curr->next)
4889 /* If we don't want to print non-fatal errors then don't consider them
4891 if (curr->detail.kind != kind
4892 || (non_fatal_only && !curr->detail.non_fatal))
4894 /* If there are multiple errors, pick up the one with the highest
4895 mismatching operand index. In the case of multiple errors with
4896 the equally highest operand index, pick up the first one or the
4897 first one with non-NULL error message. */
4898 if (curr->detail.index > largest_error_pos
4899 || (curr->detail.index == largest_error_pos && msg == NULL
4900 && curr->detail.error != NULL))
4902 largest_error_pos = curr->detail.index;
4904 msg = record->detail.error;
4908 /* The way errors are collected in the back-end is a bit non-intuitive. But
4909 essentially, because each operand template is tried recursively you may
4910 always have errors collected from the previous tried OPND. These are
4911 usually skipped if there is one successful match. However now with the
4912 non-fatal errors we have to ignore those previously collected hard errors
4913 when we're only interested in printing the non-fatal ones. This condition
4914 prevents us from printing errors that are not appropriate, since we did
4915 match a condition, but it also has warnings that it wants to print. */
4916 if (non_fatal_only && !record)
4919 gas_assert (largest_error_pos != -2 && record != NULL);
4920 DEBUG_TRACE ("Pick up error kind %s to report",
4921 operand_mismatch_kind_names[record->detail.kind]);
4924 output_operand_error_record (record, str);
4927 /* Write an AARCH64 instruction to buf - always little-endian. */
4929 put_aarch64_insn (char *buf, uint32_t insn)
4931 unsigned char *where = (unsigned char *) buf;
4933 where[1] = insn >> 8;
4934 where[2] = insn >> 16;
4935 where[3] = insn >> 24;
4939 get_aarch64_insn (char *buf)
4941 unsigned char *where = (unsigned char *) buf;
4943 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4948 output_inst (struct aarch64_inst *new_inst)
4952 to = frag_more (INSN_SIZE);
4954 frag_now->tc_frag_data.recorded = 1;
4956 put_aarch64_insn (to, inst.base.value);
4958 if (inst.reloc.type != BFD_RELOC_UNUSED)
4960 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4961 INSN_SIZE, &inst.reloc.exp,
4964 DEBUG_TRACE ("Prepared relocation fix up");
4965 /* Don't check the addend value against the instruction size,
4966 that's the job of our code in md_apply_fix(). */
4967 fixp->fx_no_overflow = 1;
4968 if (new_inst != NULL)
4969 fixp->tc_fix_data.inst = new_inst;
4970 if (aarch64_gas_internal_fixup_p ())
4972 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4973 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4974 fixp->fx_addnumber = inst.reloc.flags;
4978 dwarf2_emit_insn (INSN_SIZE);
4981 /* Link together opcodes of the same name. */
4985 aarch64_opcode *opcode;
4986 struct templates *next;
4989 typedef struct templates templates;
4992 lookup_mnemonic (const char *start, int len)
4994 templates *templ = NULL;
4996 templ = hash_find_n (aarch64_ops_hsh, start, len);
5000 /* Subroutine of md_assemble, responsible for looking up the primary
5001 opcode from the mnemonic the user wrote. STR points to the
5002 beginning of the mnemonic. */
5005 opcode_lookup (char **str)
5007 char *end, *base, *dot;
5008 const aarch64_cond *cond;
5012 /* Scan up to the end of the mnemonic, which must end in white space,
5013 '.', or end of string. */
5015 for (base = end = *str; is_part_of_name(*end); end++)
5016 if (*end == '.' && !dot)
5019 if (end == base || dot == base)
5022 inst.cond = COND_ALWAYS;
5024 /* Handle a possible condition. */
5027 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5030 inst.cond = cond->value;
5046 if (inst.cond == COND_ALWAYS)
5048 /* Look for unaffixed mnemonic. */
5049 return lookup_mnemonic (base, len);
5053 /* append ".c" to mnemonic if conditional */
5054 memcpy (condname, base, len);
5055 memcpy (condname + len, ".c", 2);
5058 return lookup_mnemonic (base, len);
5064 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5065 to a corresponding operand qualifier. */
5067 static inline aarch64_opnd_qualifier_t
5068 vectype_to_qualifier (const struct vector_type_el *vectype)
5070 /* Element size in bytes indexed by vector_el_type. */
5071 const unsigned char ele_size[5]
5073 const unsigned int ele_base [5] =
5075 AARCH64_OPND_QLF_V_4B,
5076 AARCH64_OPND_QLF_V_2H,
5077 AARCH64_OPND_QLF_V_2S,
5078 AARCH64_OPND_QLF_V_1D,
5079 AARCH64_OPND_QLF_V_1Q
5082 if (!vectype->defined || vectype->type == NT_invtype)
5083 goto vectype_conversion_fail;
5085 if (vectype->type == NT_zero)
5086 return AARCH64_OPND_QLF_P_Z;
5087 if (vectype->type == NT_merge)
5088 return AARCH64_OPND_QLF_P_M;
5090 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5092 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5094 /* Special case S_4B. */
5095 if (vectype->type == NT_b && vectype->width == 4)
5096 return AARCH64_OPND_QLF_S_4B;
5098 /* Vector element register. */
5099 return AARCH64_OPND_QLF_S_B + vectype->type;
5103 /* Vector register. */
5104 int reg_size = ele_size[vectype->type] * vectype->width;
5107 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5108 goto vectype_conversion_fail;
5110 /* The conversion is by calculating the offset from the base operand
5111 qualifier for the vector type. The operand qualifiers are regular
5112 enough that the offset can established by shifting the vector width by
5113 a vector-type dependent amount. */
5115 if (vectype->type == NT_b)
5117 else if (vectype->type == NT_h || vectype->type == NT_s)
5119 else if (vectype->type >= NT_d)
5124 offset = ele_base [vectype->type] + (vectype->width >> shift);
5125 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5126 && offset <= AARCH64_OPND_QLF_V_1Q);
5130 vectype_conversion_fail:
5131 first_error (_("bad vector arrangement type"));
5132 return AARCH64_OPND_QLF_NIL;
5135 /* Process an optional operand that is found omitted from the assembly line.
5136 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5137 instruction's opcode entry while IDX is the index of this omitted operand.
5141 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5142 int idx, aarch64_opnd_info *operand)
5144 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5145 gas_assert (optional_operand_p (opcode, idx));
5146 gas_assert (!operand->present);
5150 case AARCH64_OPND_Rd:
5151 case AARCH64_OPND_Rn:
5152 case AARCH64_OPND_Rm:
5153 case AARCH64_OPND_Rt:
5154 case AARCH64_OPND_Rt2:
5155 case AARCH64_OPND_Rt_SP:
5156 case AARCH64_OPND_Rs:
5157 case AARCH64_OPND_Ra:
5158 case AARCH64_OPND_Rt_SYS:
5159 case AARCH64_OPND_Rd_SP:
5160 case AARCH64_OPND_Rn_SP:
5161 case AARCH64_OPND_Rm_SP:
5162 case AARCH64_OPND_Fd:
5163 case AARCH64_OPND_Fn:
5164 case AARCH64_OPND_Fm:
5165 case AARCH64_OPND_Fa:
5166 case AARCH64_OPND_Ft:
5167 case AARCH64_OPND_Ft2:
5168 case AARCH64_OPND_Sd:
5169 case AARCH64_OPND_Sn:
5170 case AARCH64_OPND_Sm:
5171 case AARCH64_OPND_Va:
5172 case AARCH64_OPND_Vd:
5173 case AARCH64_OPND_Vn:
5174 case AARCH64_OPND_Vm:
5175 case AARCH64_OPND_VdD1:
5176 case AARCH64_OPND_VnD1:
5177 operand->reg.regno = default_value;
5180 case AARCH64_OPND_Ed:
5181 case AARCH64_OPND_En:
5182 case AARCH64_OPND_Em:
5183 case AARCH64_OPND_Em16:
5184 case AARCH64_OPND_SM3_IMM2:
5185 operand->reglane.regno = default_value;
5188 case AARCH64_OPND_IDX:
5189 case AARCH64_OPND_BIT_NUM:
5190 case AARCH64_OPND_IMMR:
5191 case AARCH64_OPND_IMMS:
5192 case AARCH64_OPND_SHLL_IMM:
5193 case AARCH64_OPND_IMM_VLSL:
5194 case AARCH64_OPND_IMM_VLSR:
5195 case AARCH64_OPND_CCMP_IMM:
5196 case AARCH64_OPND_FBITS:
5197 case AARCH64_OPND_UIMM4:
5198 case AARCH64_OPND_UIMM3_OP1:
5199 case AARCH64_OPND_UIMM3_OP2:
5200 case AARCH64_OPND_IMM:
5201 case AARCH64_OPND_IMM_2:
5202 case AARCH64_OPND_WIDTH:
5203 case AARCH64_OPND_UIMM7:
5204 case AARCH64_OPND_NZCV:
5205 case AARCH64_OPND_SVE_PATTERN:
5206 case AARCH64_OPND_SVE_PRFOP:
5207 operand->imm.value = default_value;
5210 case AARCH64_OPND_SVE_PATTERN_SCALED:
5211 operand->imm.value = default_value;
5212 operand->shifter.kind = AARCH64_MOD_MUL;
5213 operand->shifter.amount = 1;
5216 case AARCH64_OPND_EXCEPTION:
5217 inst.reloc.type = BFD_RELOC_UNUSED;
5220 case AARCH64_OPND_BARRIER_ISB:
5221 operand->barrier = aarch64_barrier_options + default_value;
5224 case AARCH64_OPND_BTI_TARGET:
5225 operand->hint_option = aarch64_hint_options + default_value;
5233 /* Process the relocation type for move wide instructions.
5234 Return TRUE on success; otherwise return FALSE. */
5237 process_movw_reloc_info (void)
5242 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5244 if (inst.base.opcode->op == OP_MOVK)
5245 switch (inst.reloc.type)
5247 case BFD_RELOC_AARCH64_MOVW_G0_S:
5248 case BFD_RELOC_AARCH64_MOVW_G1_S:
5249 case BFD_RELOC_AARCH64_MOVW_G2_S:
5250 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5251 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5252 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5253 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5254 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5255 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5256 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5257 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5259 (_("the specified relocation type is not allowed for MOVK"));
5265 switch (inst.reloc.type)
5267 case BFD_RELOC_AARCH64_MOVW_G0:
5268 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5269 case BFD_RELOC_AARCH64_MOVW_G0_S:
5270 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5271 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5272 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5273 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5274 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5275 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5276 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5277 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5278 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5279 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5282 case BFD_RELOC_AARCH64_MOVW_G1:
5283 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5284 case BFD_RELOC_AARCH64_MOVW_G1_S:
5285 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5286 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5287 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5288 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5289 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5290 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5291 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5292 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5293 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5294 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5297 case BFD_RELOC_AARCH64_MOVW_G2:
5298 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5299 case BFD_RELOC_AARCH64_MOVW_G2_S:
5300 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5301 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5302 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5303 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5306 set_fatal_syntax_error
5307 (_("the specified relocation type is not allowed for 32-bit "
5313 case BFD_RELOC_AARCH64_MOVW_G3:
5314 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5317 set_fatal_syntax_error
5318 (_("the specified relocation type is not allowed for 32-bit "
5325 /* More cases should be added when more MOVW-related relocation types
5326 are supported in GAS. */
5327 gas_assert (aarch64_gas_internal_fixup_p ());
5328 /* The shift amount should have already been set by the parser. */
5331 inst.base.operands[1].shifter.amount = shift;
5335 /* A primitive log calculator. */
5337 static inline unsigned int
5338 get_logsz (unsigned int size)
5340 const unsigned char ls[16] =
5341 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5347 gas_assert (ls[size - 1] != (unsigned char)-1);
5348 return ls[size - 1];
5351 /* Determine and return the real reloc type code for an instruction
5352 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5354 static inline bfd_reloc_code_real_type
5355 ldst_lo12_determine_real_reloc_type (void)
5358 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5359 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5361 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5363 BFD_RELOC_AARCH64_LDST8_LO12,
5364 BFD_RELOC_AARCH64_LDST16_LO12,
5365 BFD_RELOC_AARCH64_LDST32_LO12,
5366 BFD_RELOC_AARCH64_LDST64_LO12,
5367 BFD_RELOC_AARCH64_LDST128_LO12
5370 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5371 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5372 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5373 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5374 BFD_RELOC_AARCH64_NONE
5377 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5378 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5379 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5380 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5381 BFD_RELOC_AARCH64_NONE
5384 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5385 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5386 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5387 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5388 BFD_RELOC_AARCH64_NONE
5391 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5392 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5393 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5394 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5395 BFD_RELOC_AARCH64_NONE
5399 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5400 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5402 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5404 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5406 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5407 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5409 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5411 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5413 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5415 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5416 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5417 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5418 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5419 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5420 gas_assert (logsz <= 3);
5422 gas_assert (logsz <= 4);
5424 /* In reloc.c, these pseudo relocation types should be defined in similar
5425 order as above reloc_ldst_lo12 array. Because the array index calculation
5426 below relies on this. */
5427 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5430 /* Check whether a register list REGINFO is valid. The registers must be
5431 numbered in increasing order (modulo 32), in increments of one or two.
5433 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5436 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5439 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5441 uint32_t i, nb_regs, prev_regno, incr;
5443 nb_regs = 1 + (reginfo & 0x3);
5445 prev_regno = reginfo & 0x1f;
5446 incr = accept_alternate ? 2 : 1;
5448 for (i = 1; i < nb_regs; ++i)
5450 uint32_t curr_regno;
5452 curr_regno = reginfo & 0x1f;
5453 if (curr_regno != ((prev_regno + incr) & 0x1f))
5455 prev_regno = curr_regno;
5461 /* Generic instruction operand parser. This does no encoding and no
5462 semantic validation; it merely squirrels values away in the inst
5463 structure. Returns TRUE or FALSE depending on whether the
5464 specified grammar matched. */
5467 parse_operands (char *str, const aarch64_opcode *opcode)
5470 char *backtrack_pos = 0;
5471 const enum aarch64_opnd *operands = opcode->operands;
5472 aarch64_reg_type imm_reg_type;
5475 skip_whitespace (str);
5477 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5478 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5480 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5482 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5485 const reg_entry *reg;
5486 int comma_skipped_p = 0;
5487 aarch64_reg_type rtype;
5488 struct vector_type_el vectype;
5489 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5490 aarch64_opnd_info *info = &inst.base.operands[i];
5491 aarch64_reg_type reg_type;
5493 DEBUG_TRACE ("parse operand %d", i);
5495 /* Assign the operand code. */
5496 info->type = operands[i];
5498 if (optional_operand_p (opcode, i))
5500 /* Remember where we are in case we need to backtrack. */
5501 gas_assert (!backtrack_pos);
5502 backtrack_pos = str;
5505 /* Expect comma between operands; the backtrack mechanism will take
5506 care of cases of omitted optional operand. */
5507 if (i > 0 && ! skip_past_char (&str, ','))
5509 set_syntax_error (_("comma expected between operands"));
5513 comma_skipped_p = 1;
5515 switch (operands[i])
5517 case AARCH64_OPND_Rd:
5518 case AARCH64_OPND_Rn:
5519 case AARCH64_OPND_Rm:
5520 case AARCH64_OPND_Rt:
5521 case AARCH64_OPND_Rt2:
5522 case AARCH64_OPND_Rs:
5523 case AARCH64_OPND_Ra:
5524 case AARCH64_OPND_Rt_SYS:
5525 case AARCH64_OPND_PAIRREG:
5526 case AARCH64_OPND_SVE_Rm:
5527 po_int_reg_or_fail (REG_TYPE_R_Z);
5530 case AARCH64_OPND_Rd_SP:
5531 case AARCH64_OPND_Rn_SP:
5532 case AARCH64_OPND_Rt_SP:
5533 case AARCH64_OPND_SVE_Rn_SP:
5534 case AARCH64_OPND_Rm_SP:
5535 po_int_reg_or_fail (REG_TYPE_R_SP);
5538 case AARCH64_OPND_Rm_EXT:
5539 case AARCH64_OPND_Rm_SFT:
5540 po_misc_or_fail (parse_shifter_operand
5541 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5543 : SHIFTED_LOGIC_IMM)));
5544 if (!info->shifter.operator_present)
5546 /* Default to LSL if not present. Libopcodes prefers shifter
5547 kind to be explicit. */
5548 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5549 info->shifter.kind = AARCH64_MOD_LSL;
5550 /* For Rm_EXT, libopcodes will carry out further check on whether
5551 or not stack pointer is used in the instruction (Recall that
5552 "the extend operator is not optional unless at least one of
5553 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5557 case AARCH64_OPND_Fd:
5558 case AARCH64_OPND_Fn:
5559 case AARCH64_OPND_Fm:
5560 case AARCH64_OPND_Fa:
5561 case AARCH64_OPND_Ft:
5562 case AARCH64_OPND_Ft2:
5563 case AARCH64_OPND_Sd:
5564 case AARCH64_OPND_Sn:
5565 case AARCH64_OPND_Sm:
5566 case AARCH64_OPND_SVE_VZn:
5567 case AARCH64_OPND_SVE_Vd:
5568 case AARCH64_OPND_SVE_Vm:
5569 case AARCH64_OPND_SVE_Vn:
5570 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5571 if (val == PARSE_FAIL)
5573 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5576 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5578 info->reg.regno = val;
5579 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5582 case AARCH64_OPND_SVE_Pd:
5583 case AARCH64_OPND_SVE_Pg3:
5584 case AARCH64_OPND_SVE_Pg4_5:
5585 case AARCH64_OPND_SVE_Pg4_10:
5586 case AARCH64_OPND_SVE_Pg4_16:
5587 case AARCH64_OPND_SVE_Pm:
5588 case AARCH64_OPND_SVE_Pn:
5589 case AARCH64_OPND_SVE_Pt:
5590 reg_type = REG_TYPE_PN;
5593 case AARCH64_OPND_SVE_Za_5:
5594 case AARCH64_OPND_SVE_Za_16:
5595 case AARCH64_OPND_SVE_Zd:
5596 case AARCH64_OPND_SVE_Zm_5:
5597 case AARCH64_OPND_SVE_Zm_16:
5598 case AARCH64_OPND_SVE_Zn:
5599 case AARCH64_OPND_SVE_Zt:
5600 reg_type = REG_TYPE_ZN;
5603 case AARCH64_OPND_Va:
5604 case AARCH64_OPND_Vd:
5605 case AARCH64_OPND_Vn:
5606 case AARCH64_OPND_Vm:
5607 reg_type = REG_TYPE_VN;
5609 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5610 if (val == PARSE_FAIL)
5612 first_error (_(get_reg_expected_msg (reg_type)));
5615 if (vectype.defined & NTA_HASINDEX)
5618 info->reg.regno = val;
5619 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5620 && vectype.type == NT_invtype)
5621 /* Unqualified Pn and Zn registers are allowed in certain
5622 contexts. Rely on F_STRICT qualifier checking to catch
5624 info->qualifier = AARCH64_OPND_QLF_NIL;
5627 info->qualifier = vectype_to_qualifier (&vectype);
5628 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5633 case AARCH64_OPND_VdD1:
5634 case AARCH64_OPND_VnD1:
5635 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5636 if (val == PARSE_FAIL)
5638 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5641 if (vectype.type != NT_d || vectype.index != 1)
5643 set_fatal_syntax_error
5644 (_("the top half of a 128-bit FP/SIMD register is expected"));
5647 info->reg.regno = val;
5648 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5649 here; it is correct for the purpose of encoding/decoding since
5650 only the register number is explicitly encoded in the related
5651 instructions, although this appears a bit hacky. */
5652 info->qualifier = AARCH64_OPND_QLF_S_D;
5655 case AARCH64_OPND_SVE_Zm3_INDEX:
5656 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5657 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5658 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5659 case AARCH64_OPND_SVE_Zm4_INDEX:
5660 case AARCH64_OPND_SVE_Zn_INDEX:
5661 reg_type = REG_TYPE_ZN;
5662 goto vector_reg_index;
5664 case AARCH64_OPND_Ed:
5665 case AARCH64_OPND_En:
5666 case AARCH64_OPND_Em:
5667 case AARCH64_OPND_Em16:
5668 case AARCH64_OPND_SM3_IMM2:
5669 reg_type = REG_TYPE_VN;
5671 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5672 if (val == PARSE_FAIL)
5674 first_error (_(get_reg_expected_msg (reg_type)));
5677 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5680 info->reglane.regno = val;
5681 info->reglane.index = vectype.index;
5682 info->qualifier = vectype_to_qualifier (&vectype);
5683 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5687 case AARCH64_OPND_SVE_ZnxN:
5688 case AARCH64_OPND_SVE_ZtxN:
5689 reg_type = REG_TYPE_ZN;
5690 goto vector_reg_list;
5692 case AARCH64_OPND_LVn:
5693 case AARCH64_OPND_LVt:
5694 case AARCH64_OPND_LVt_AL:
5695 case AARCH64_OPND_LEt:
5696 reg_type = REG_TYPE_VN;
5698 if (reg_type == REG_TYPE_ZN
5699 && get_opcode_dependent_value (opcode) == 1
5702 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5703 if (val == PARSE_FAIL)
5705 first_error (_(get_reg_expected_msg (reg_type)));
5708 info->reglist.first_regno = val;
5709 info->reglist.num_regs = 1;
5713 val = parse_vector_reg_list (&str, reg_type, &vectype);
5714 if (val == PARSE_FAIL)
5716 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5718 set_fatal_syntax_error (_("invalid register list"));
5721 info->reglist.first_regno = (val >> 2) & 0x1f;
5722 info->reglist.num_regs = (val & 0x3) + 1;
5724 if (operands[i] == AARCH64_OPND_LEt)
5726 if (!(vectype.defined & NTA_HASINDEX))
5728 info->reglist.has_index = 1;
5729 info->reglist.index = vectype.index;
5733 if (vectype.defined & NTA_HASINDEX)
5735 if (!(vectype.defined & NTA_HASTYPE))
5737 if (reg_type == REG_TYPE_ZN)
5738 set_fatal_syntax_error (_("missing type suffix"));
5742 info->qualifier = vectype_to_qualifier (&vectype);
5743 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5747 case AARCH64_OPND_CRn:
5748 case AARCH64_OPND_CRm:
5750 char prefix = *(str++);
5751 if (prefix != 'c' && prefix != 'C')
5754 po_imm_nc_or_fail ();
5757 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5760 info->qualifier = AARCH64_OPND_QLF_CR;
5761 info->imm.value = val;
5765 case AARCH64_OPND_SHLL_IMM:
5766 case AARCH64_OPND_IMM_VLSR:
5767 po_imm_or_fail (1, 64);
5768 info->imm.value = val;
5771 case AARCH64_OPND_CCMP_IMM:
5772 case AARCH64_OPND_SIMM5:
5773 case AARCH64_OPND_FBITS:
5774 case AARCH64_OPND_TME_UIMM16:
5775 case AARCH64_OPND_UIMM4:
5776 case AARCH64_OPND_UIMM4_ADDG:
5777 case AARCH64_OPND_UIMM10:
5778 case AARCH64_OPND_UIMM3_OP1:
5779 case AARCH64_OPND_UIMM3_OP2:
5780 case AARCH64_OPND_IMM_VLSL:
5781 case AARCH64_OPND_IMM:
5782 case AARCH64_OPND_IMM_2:
5783 case AARCH64_OPND_WIDTH:
5784 case AARCH64_OPND_SVE_INV_LIMM:
5785 case AARCH64_OPND_SVE_LIMM:
5786 case AARCH64_OPND_SVE_LIMM_MOV:
5787 case AARCH64_OPND_SVE_SHLIMM_PRED:
5788 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5789 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
5790 case AARCH64_OPND_SVE_SHRIMM_PRED:
5791 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5792 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
5793 case AARCH64_OPND_SVE_SIMM5:
5794 case AARCH64_OPND_SVE_SIMM5B:
5795 case AARCH64_OPND_SVE_SIMM6:
5796 case AARCH64_OPND_SVE_SIMM8:
5797 case AARCH64_OPND_SVE_UIMM3:
5798 case AARCH64_OPND_SVE_UIMM7:
5799 case AARCH64_OPND_SVE_UIMM8:
5800 case AARCH64_OPND_SVE_UIMM8_53:
5801 case AARCH64_OPND_IMM_ROT1:
5802 case AARCH64_OPND_IMM_ROT2:
5803 case AARCH64_OPND_IMM_ROT3:
5804 case AARCH64_OPND_SVE_IMM_ROT1:
5805 case AARCH64_OPND_SVE_IMM_ROT2:
5806 case AARCH64_OPND_SVE_IMM_ROT3:
5807 po_imm_nc_or_fail ();
5808 info->imm.value = val;
5811 case AARCH64_OPND_SVE_AIMM:
5812 case AARCH64_OPND_SVE_ASIMM:
5813 po_imm_nc_or_fail ();
5814 info->imm.value = val;
5815 skip_whitespace (str);
5816 if (skip_past_comma (&str))
5817 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5819 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5822 case AARCH64_OPND_SVE_PATTERN:
5823 po_enum_or_fail (aarch64_sve_pattern_array);
5824 info->imm.value = val;
5827 case AARCH64_OPND_SVE_PATTERN_SCALED:
5828 po_enum_or_fail (aarch64_sve_pattern_array);
5829 info->imm.value = val;
5830 if (skip_past_comma (&str)
5831 && !parse_shift (&str, info, SHIFTED_MUL))
5833 if (!info->shifter.operator_present)
5835 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5836 info->shifter.kind = AARCH64_MOD_MUL;
5837 info->shifter.amount = 1;
5841 case AARCH64_OPND_SVE_PRFOP:
5842 po_enum_or_fail (aarch64_sve_prfop_array);
5843 info->imm.value = val;
5846 case AARCH64_OPND_UIMM7:
5847 po_imm_or_fail (0, 127);
5848 info->imm.value = val;
5851 case AARCH64_OPND_IDX:
5852 case AARCH64_OPND_MASK:
5853 case AARCH64_OPND_BIT_NUM:
5854 case AARCH64_OPND_IMMR:
5855 case AARCH64_OPND_IMMS:
5856 po_imm_or_fail (0, 63);
5857 info->imm.value = val;
5860 case AARCH64_OPND_IMM0:
5861 po_imm_nc_or_fail ();
5864 set_fatal_syntax_error (_("immediate zero expected"));
5867 info->imm.value = 0;
5870 case AARCH64_OPND_FPIMM0:
5873 bfd_boolean res1 = FALSE, res2 = FALSE;
5874 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5875 it is probably not worth the effort to support it. */
5876 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5879 || !(res2 = parse_constant_immediate (&str, &val,
5882 if ((res1 && qfloat == 0) || (res2 && val == 0))
5884 info->imm.value = 0;
5885 info->imm.is_fp = 1;
5888 set_fatal_syntax_error (_("immediate zero expected"));
5892 case AARCH64_OPND_IMM_MOV:
5895 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5896 reg_name_p (str, REG_TYPE_VN))
5899 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5901 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5902 later. fix_mov_imm_insn will try to determine a machine
5903 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5904 message if the immediate cannot be moved by a single
5906 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5907 inst.base.operands[i].skip = 1;
5911 case AARCH64_OPND_SIMD_IMM:
5912 case AARCH64_OPND_SIMD_IMM_SFT:
5913 if (! parse_big_immediate (&str, &val, imm_reg_type))
5915 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5917 /* need_libopcodes_p */ 1,
5920 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5921 shift, we don't check it here; we leave the checking to
5922 the libopcodes (operand_general_constraint_met_p). By
5923 doing this, we achieve better diagnostics. */
5924 if (skip_past_comma (&str)
5925 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5927 if (!info->shifter.operator_present
5928 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5930 /* Default to LSL if not present. Libopcodes prefers shifter
5931 kind to be explicit. */
5932 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5933 info->shifter.kind = AARCH64_MOD_LSL;
5937 case AARCH64_OPND_FPIMM:
5938 case AARCH64_OPND_SIMD_FPIMM:
5939 case AARCH64_OPND_SVE_FPIMM8:
5944 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5945 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5946 || !aarch64_imm_float_p (qfloat))
5949 set_fatal_syntax_error (_("invalid floating-point"
5953 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5954 inst.base.operands[i].imm.is_fp = 1;
5958 case AARCH64_OPND_SVE_I1_HALF_ONE:
5959 case AARCH64_OPND_SVE_I1_HALF_TWO:
5960 case AARCH64_OPND_SVE_I1_ZERO_ONE:
5965 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5966 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5969 set_fatal_syntax_error (_("invalid floating-point"
5973 inst.base.operands[i].imm.value = qfloat;
5974 inst.base.operands[i].imm.is_fp = 1;
5978 case AARCH64_OPND_LIMM:
5979 po_misc_or_fail (parse_shifter_operand (&str, info,
5980 SHIFTED_LOGIC_IMM));
5981 if (info->shifter.operator_present)
5983 set_fatal_syntax_error
5984 (_("shift not allowed for bitmask immediate"));
5987 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5989 /* need_libopcodes_p */ 1,
5993 case AARCH64_OPND_AIMM:
5994 if (opcode->op == OP_ADD)
5995 /* ADD may have relocation types. */
5996 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5997 SHIFTED_ARITH_IMM));
5999 po_misc_or_fail (parse_shifter_operand (&str, info,
6000 SHIFTED_ARITH_IMM));
6001 switch (inst.reloc.type)
6003 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6004 info->shifter.amount = 12;
6006 case BFD_RELOC_UNUSED:
6007 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6008 if (info->shifter.kind != AARCH64_MOD_NONE)
6009 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6010 inst.reloc.pc_rel = 0;
6015 info->imm.value = 0;
6016 if (!info->shifter.operator_present)
6018 /* Default to LSL if not present. Libopcodes prefers shifter
6019 kind to be explicit. */
6020 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6021 info->shifter.kind = AARCH64_MOD_LSL;
6025 case AARCH64_OPND_HALF:
6027 /* #<imm16> or relocation. */
6028 int internal_fixup_p;
6029 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6030 if (internal_fixup_p)
6031 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6032 skip_whitespace (str);
6033 if (skip_past_comma (&str))
6035 /* {, LSL #<shift>} */
6036 if (! aarch64_gas_internal_fixup_p ())
6038 set_fatal_syntax_error (_("can't mix relocation modifier "
6039 "with explicit shift"));
6042 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6045 inst.base.operands[i].shifter.amount = 0;
6046 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6047 inst.base.operands[i].imm.value = 0;
6048 if (! process_movw_reloc_info ())
6053 case AARCH64_OPND_EXCEPTION:
6054 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6056 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6058 /* need_libopcodes_p */ 0,
6062 case AARCH64_OPND_NZCV:
6064 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
6068 info->imm.value = nzcv->value;
6071 po_imm_or_fail (0, 15);
6072 info->imm.value = val;
6076 case AARCH64_OPND_COND:
6077 case AARCH64_OPND_COND1:
6082 while (ISALPHA (*str));
6083 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
6084 if (info->cond == NULL)
6086 set_syntax_error (_("invalid condition"));
6089 else if (operands[i] == AARCH64_OPND_COND1
6090 && (info->cond->value & 0xe) == 0xe)
6092 /* Do not allow AL or NV. */
6093 set_default_error ();
6099 case AARCH64_OPND_ADDR_ADRP:
6100 po_misc_or_fail (parse_adrp (&str));
6101 /* Clear the value as operand needs to be relocated. */
6102 info->imm.value = 0;
6105 case AARCH64_OPND_ADDR_PCREL14:
6106 case AARCH64_OPND_ADDR_PCREL19:
6107 case AARCH64_OPND_ADDR_PCREL21:
6108 case AARCH64_OPND_ADDR_PCREL26:
6109 po_misc_or_fail (parse_address (&str, info));
6110 if (!info->addr.pcrel)
6112 set_syntax_error (_("invalid pc-relative address"));
6115 if (inst.gen_lit_pool
6116 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6118 /* Only permit "=value" in the literal load instructions.
6119 The literal will be generated by programmer_friendly_fixup. */
6120 set_syntax_error (_("invalid use of \"=immediate\""));
6123 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6125 set_syntax_error (_("unrecognized relocation suffix"));
6128 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6130 info->imm.value = inst.reloc.exp.X_add_number;
6131 inst.reloc.type = BFD_RELOC_UNUSED;
6135 info->imm.value = 0;
6136 if (inst.reloc.type == BFD_RELOC_UNUSED)
6137 switch (opcode->iclass)
6141 /* e.g. CBZ or B.COND */
6142 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6143 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6147 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6148 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6152 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6154 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6155 : BFD_RELOC_AARCH64_JUMP26;
6158 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6159 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6162 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6163 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6169 inst.reloc.pc_rel = 1;
6173 case AARCH64_OPND_ADDR_SIMPLE:
6174 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6176 /* [<Xn|SP>{, #<simm>}] */
6178 /* First use the normal address-parsing routines, to get
6179 the usual syntax errors. */
6180 po_misc_or_fail (parse_address (&str, info));
6181 if (info->addr.pcrel || info->addr.offset.is_reg
6182 || !info->addr.preind || info->addr.postind
6183 || info->addr.writeback)
6185 set_syntax_error (_("invalid addressing mode"));
6189 /* Then retry, matching the specific syntax of these addresses. */
6191 po_char_or_fail ('[');
6192 po_reg_or_fail (REG_TYPE_R64_SP);
6193 /* Accept optional ", #0". */
6194 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6195 && skip_past_char (&str, ','))
6197 skip_past_char (&str, '#');
6198 if (! skip_past_char (&str, '0'))
6200 set_fatal_syntax_error
6201 (_("the optional immediate offset can only be 0"));
6205 po_char_or_fail (']');
6209 case AARCH64_OPND_ADDR_REGOFF:
6210 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6211 po_misc_or_fail (parse_address (&str, info));
6213 if (info->addr.pcrel || !info->addr.offset.is_reg
6214 || !info->addr.preind || info->addr.postind
6215 || info->addr.writeback)
6217 set_syntax_error (_("invalid addressing mode"));
6220 if (!info->shifter.operator_present)
6222 /* Default to LSL if not present. Libopcodes prefers shifter
6223 kind to be explicit. */
6224 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6225 info->shifter.kind = AARCH64_MOD_LSL;
6227 /* Qualifier to be deduced by libopcodes. */
6230 case AARCH64_OPND_ADDR_SIMM7:
6231 po_misc_or_fail (parse_address (&str, info));
6232 if (info->addr.pcrel || info->addr.offset.is_reg
6233 || (!info->addr.preind && !info->addr.postind))
6235 set_syntax_error (_("invalid addressing mode"));
6238 if (inst.reloc.type != BFD_RELOC_UNUSED)
6240 set_syntax_error (_("relocation not allowed"));
6243 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6245 /* need_libopcodes_p */ 1,
6249 case AARCH64_OPND_ADDR_SIMM9:
6250 case AARCH64_OPND_ADDR_SIMM9_2:
6251 case AARCH64_OPND_ADDR_SIMM11:
6252 case AARCH64_OPND_ADDR_SIMM13:
6253 po_misc_or_fail (parse_address (&str, info));
6254 if (info->addr.pcrel || info->addr.offset.is_reg
6255 || (!info->addr.preind && !info->addr.postind)
6256 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6257 && info->addr.writeback))
6259 set_syntax_error (_("invalid addressing mode"));
6262 if (inst.reloc.type != BFD_RELOC_UNUSED)
6264 set_syntax_error (_("relocation not allowed"));
6267 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6269 /* need_libopcodes_p */ 1,
6273 case AARCH64_OPND_ADDR_SIMM10:
6274 case AARCH64_OPND_ADDR_OFFSET:
6275 po_misc_or_fail (parse_address (&str, info));
6276 if (info->addr.pcrel || info->addr.offset.is_reg
6277 || !info->addr.preind || info->addr.postind)
6279 set_syntax_error (_("invalid addressing mode"));
6282 if (inst.reloc.type != BFD_RELOC_UNUSED)
6284 set_syntax_error (_("relocation not allowed"));
6287 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6289 /* need_libopcodes_p */ 1,
6293 case AARCH64_OPND_ADDR_UIMM12:
6294 po_misc_or_fail (parse_address (&str, info));
6295 if (info->addr.pcrel || info->addr.offset.is_reg
6296 || !info->addr.preind || info->addr.writeback)
6298 set_syntax_error (_("invalid addressing mode"));
6301 if (inst.reloc.type == BFD_RELOC_UNUSED)
6302 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6303 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6305 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6307 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6309 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6311 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6312 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6313 /* Leave qualifier to be determined by libopcodes. */
6316 case AARCH64_OPND_SIMD_ADDR_POST:
6317 /* [<Xn|SP>], <Xm|#<amount>> */
6318 po_misc_or_fail (parse_address (&str, info));
6319 if (!info->addr.postind || !info->addr.writeback)
6321 set_syntax_error (_("invalid addressing mode"));
6324 if (!info->addr.offset.is_reg)
6326 if (inst.reloc.exp.X_op == O_constant)
6327 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6330 set_fatal_syntax_error
6331 (_("writeback value must be an immediate constant"));
6338 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6339 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6340 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6341 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6342 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6343 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6344 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6345 case AARCH64_OPND_SVE_ADDR_RI_U6:
6346 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6347 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6348 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6349 /* [X<n>{, #imm, MUL VL}]
6351 but recognizing SVE registers. */
6352 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6353 &offset_qualifier));
6354 if (base_qualifier != AARCH64_OPND_QLF_X)
6356 set_syntax_error (_("invalid addressing mode"));
6360 if (info->addr.pcrel || info->addr.offset.is_reg
6361 || !info->addr.preind || info->addr.writeback)
6363 set_syntax_error (_("invalid addressing mode"));
6366 if (inst.reloc.type != BFD_RELOC_UNUSED
6367 || inst.reloc.exp.X_op != O_constant)
6369 /* Make sure this has priority over
6370 "invalid addressing mode". */
6371 set_fatal_syntax_error (_("constant offset required"));
6374 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6377 case AARCH64_OPND_SVE_ADDR_R:
6378 /* [<Xn|SP>{, <R><m>}]
6379 but recognizing SVE registers. */
6380 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6381 &offset_qualifier));
6382 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6384 offset_qualifier = AARCH64_OPND_QLF_X;
6385 info->addr.offset.is_reg = 1;
6386 info->addr.offset.regno = 31;
6388 else if (base_qualifier != AARCH64_OPND_QLF_X
6389 || offset_qualifier != AARCH64_OPND_QLF_X)
6391 set_syntax_error (_("invalid addressing mode"));
6396 case AARCH64_OPND_SVE_ADDR_RR:
6397 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6398 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6399 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6400 case AARCH64_OPND_SVE_ADDR_RX:
6401 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6402 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6403 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6404 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6405 but recognizing SVE registers. */
6406 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6407 &offset_qualifier));
6408 if (base_qualifier != AARCH64_OPND_QLF_X
6409 || offset_qualifier != AARCH64_OPND_QLF_X)
6411 set_syntax_error (_("invalid addressing mode"));
6416 case AARCH64_OPND_SVE_ADDR_RZ:
6417 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6418 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6419 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6420 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6421 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6422 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6423 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6424 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6425 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6426 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6427 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6428 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6429 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6430 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6431 &offset_qualifier));
6432 if (base_qualifier != AARCH64_OPND_QLF_X
6433 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6434 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6436 set_syntax_error (_("invalid addressing mode"));
6439 info->qualifier = offset_qualifier;
6442 case AARCH64_OPND_SVE_ADDR_ZX:
6443 /* [Zn.<T>{, <Xm>}]. */
6444 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6445 &offset_qualifier));
6447 base_qualifier either S_S or S_D
6448 offset_qualifier must be X
6450 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6451 && base_qualifier != AARCH64_OPND_QLF_S_D)
6452 || offset_qualifier != AARCH64_OPND_QLF_X)
6454 set_syntax_error (_("invalid addressing mode"));
6457 info->qualifier = base_qualifier;
6458 if (!info->addr.offset.is_reg || info->addr.pcrel
6459 || !info->addr.preind || info->addr.writeback
6460 || info->shifter.operator_present != 0)
6462 set_syntax_error (_("invalid addressing mode"));
6465 info->shifter.kind = AARCH64_MOD_LSL;
6469 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6470 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6471 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6472 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6473 /* [Z<n>.<T>{, #imm}] */
6474 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6475 &offset_qualifier));
6476 if (base_qualifier != AARCH64_OPND_QLF_S_S
6477 && base_qualifier != AARCH64_OPND_QLF_S_D)
6479 set_syntax_error (_("invalid addressing mode"));
6482 info->qualifier = base_qualifier;
6485 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6486 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6487 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6488 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6489 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6493 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6495 here since we get better error messages by leaving it to
6496 the qualifier checking routines. */
6497 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6498 &offset_qualifier));
6499 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6500 && base_qualifier != AARCH64_OPND_QLF_S_D)
6501 || offset_qualifier != base_qualifier)
6503 set_syntax_error (_("invalid addressing mode"));
6506 info->qualifier = base_qualifier;
6509 case AARCH64_OPND_SYSREG:
6511 uint32_t sysreg_flags;
6512 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6513 &sysreg_flags)) == PARSE_FAIL)
6515 set_syntax_error (_("unknown or missing system register name"));
6518 inst.base.operands[i].sysreg.value = val;
6519 inst.base.operands[i].sysreg.flags = sysreg_flags;
6523 case AARCH64_OPND_PSTATEFIELD:
6524 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6527 set_syntax_error (_("unknown or missing PSTATE field name"));
6530 inst.base.operands[i].pstatefield = val;
6533 case AARCH64_OPND_SYSREG_IC:
6534 inst.base.operands[i].sysins_op =
6535 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6538 case AARCH64_OPND_SYSREG_DC:
6539 inst.base.operands[i].sysins_op =
6540 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6543 case AARCH64_OPND_SYSREG_AT:
6544 inst.base.operands[i].sysins_op =
6545 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6548 case AARCH64_OPND_SYSREG_SR:
6549 inst.base.operands[i].sysins_op =
6550 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6553 case AARCH64_OPND_SYSREG_TLBI:
6554 inst.base.operands[i].sysins_op =
6555 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6557 if (inst.base.operands[i].sysins_op == NULL)
6559 set_fatal_syntax_error ( _("unknown or missing operation name"));
6564 case AARCH64_OPND_BARRIER:
6565 case AARCH64_OPND_BARRIER_ISB:
6566 val = parse_barrier (&str);
6567 if (val != PARSE_FAIL
6568 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6570 /* ISB only accepts options name 'sy'. */
6572 (_("the specified option is not accepted in ISB"));
6573 /* Turn off backtrack as this optional operand is present. */
6577 /* This is an extension to accept a 0..15 immediate. */
6578 if (val == PARSE_FAIL)
6579 po_imm_or_fail (0, 15);
6580 info->barrier = aarch64_barrier_options + val;
6583 case AARCH64_OPND_PRFOP:
6584 val = parse_pldop (&str);
6585 /* This is an extension to accept a 0..31 immediate. */
6586 if (val == PARSE_FAIL)
6587 po_imm_or_fail (0, 31);
6588 inst.base.operands[i].prfop = aarch64_prfops + val;
6591 case AARCH64_OPND_BARRIER_PSB:
6592 val = parse_barrier_psb (&str, &(info->hint_option));
6593 if (val == PARSE_FAIL)
6597 case AARCH64_OPND_BTI_TARGET:
6598 val = parse_bti_operand (&str, &(info->hint_option));
6599 if (val == PARSE_FAIL)
6604 as_fatal (_("unhandled operand code %d"), operands[i]);
6607 /* If we get here, this operand was successfully parsed. */
6608 inst.base.operands[i].present = 1;
6612 /* The parse routine should already have set the error, but in case
6613 not, set a default one here. */
6615 set_default_error ();
6617 if (! backtrack_pos)
6618 goto parse_operands_return;
6621 /* We reach here because this operand is marked as optional, and
6622 either no operand was supplied or the operand was supplied but it
6623 was syntactically incorrect. In the latter case we report an
6624 error. In the former case we perform a few more checks before
6625 dropping through to the code to insert the default operand. */
6627 char *tmp = backtrack_pos;
6628 char endchar = END_OF_INSN;
6630 if (i != (aarch64_num_of_operands (opcode) - 1))
6632 skip_past_char (&tmp, ',');
6634 if (*tmp != endchar)
6635 /* The user has supplied an operand in the wrong format. */
6636 goto parse_operands_return;
6638 /* Make sure there is not a comma before the optional operand.
6639 For example the fifth operand of 'sys' is optional:
6641 sys #0,c0,c0,#0, <--- wrong
6642 sys #0,c0,c0,#0 <--- correct. */
6643 if (comma_skipped_p && i && endchar == END_OF_INSN)
6645 set_fatal_syntax_error
6646 (_("unexpected comma before the omitted optional operand"));
6647 goto parse_operands_return;
6651 /* Reaching here means we are dealing with an optional operand that is
6652 omitted from the assembly line. */
6653 gas_assert (optional_operand_p (opcode, i));
6655 process_omitted_operand (operands[i], opcode, i, info);
6657 /* Try again, skipping the optional operand at backtrack_pos. */
6658 str = backtrack_pos;
6661 /* Clear any error record after the omitted optional operand has been
6662 successfully handled. */
6666 /* Check if we have parsed all the operands. */
6667 if (*str != '\0' && ! error_p ())
6669 /* Set I to the index of the last present operand; this is
6670 for the purpose of diagnostics. */
6671 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6673 set_fatal_syntax_error
6674 (_("unexpected characters following instruction"));
6677 parse_operands_return:
6681 DEBUG_TRACE ("parsing FAIL: %s - %s",
6682 operand_mismatch_kind_names[get_error_kind ()],
6683 get_error_message ());
6684 /* Record the operand error properly; this is useful when there
6685 are multiple instruction templates for a mnemonic name, so that
6686 later on, we can select the error that most closely describes
6688 record_operand_error (opcode, i, get_error_kind (),
6689 get_error_message ());
6694 DEBUG_TRACE ("parsing SUCCESS");
6699 /* It does some fix-up to provide some programmer friendly feature while
6700 keeping the libopcodes happy, i.e. libopcodes only accepts
6701 the preferred architectural syntax.
6702 Return FALSE if there is any failure; otherwise return TRUE. */
6705 programmer_friendly_fixup (aarch64_instruction *instr)
6707 aarch64_inst *base = &instr->base;
6708 const aarch64_opcode *opcode = base->opcode;
6709 enum aarch64_op op = opcode->op;
6710 aarch64_opnd_info *operands = base->operands;
6712 DEBUG_TRACE ("enter");
6714 switch (opcode->iclass)
6717 /* TBNZ Xn|Wn, #uimm6, label
6718 Test and Branch Not Zero: conditionally jumps to label if bit number
6719 uimm6 in register Xn is not zero. The bit number implies the width of
6720 the register, which may be written and should be disassembled as Wn if
6721 uimm is less than 32. */
6722 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6724 if (operands[1].imm.value >= 32)
6726 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6730 operands[0].qualifier = AARCH64_OPND_QLF_X;
6734 /* LDR Wt, label | =value
6735 As a convenience assemblers will typically permit the notation
6736 "=value" in conjunction with the pc-relative literal load instructions
6737 to automatically place an immediate value or symbolic address in a
6738 nearby literal pool and generate a hidden label which references it.
6739 ISREG has been set to 0 in the case of =value. */
6740 if (instr->gen_lit_pool
6741 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6743 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6744 if (op == OP_LDRSW_LIT)
6746 if (instr->reloc.exp.X_op != O_constant
6747 && instr->reloc.exp.X_op != O_big
6748 && instr->reloc.exp.X_op != O_symbol)
6750 record_operand_error (opcode, 1,
6751 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6752 _("constant expression expected"));
6755 if (! add_to_lit_pool (&instr->reloc.exp, size))
6757 record_operand_error (opcode, 1,
6758 AARCH64_OPDE_OTHER_ERROR,
6759 _("literal pool insertion failed"));
6767 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6768 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6769 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6770 A programmer-friendly assembler should accept a destination Xd in
6771 place of Wd, however that is not the preferred form for disassembly.
6773 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6774 && operands[1].qualifier == AARCH64_OPND_QLF_W
6775 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6776 operands[0].qualifier = AARCH64_OPND_QLF_W;
6781 /* In the 64-bit form, the final register operand is written as Wm
6782 for all but the (possibly omitted) UXTX/LSL and SXTX
6784 As a programmer-friendly assembler, we accept e.g.
6785 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6786 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6787 int idx = aarch64_operand_index (opcode->operands,
6788 AARCH64_OPND_Rm_EXT);
6789 gas_assert (idx == 1 || idx == 2);
6790 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6791 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6792 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6793 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6794 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6795 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6803 DEBUG_TRACE ("exit with SUCCESS");
6807 /* Check for loads and stores that will cause unpredictable behavior. */
6810 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6812 aarch64_inst *base = &instr->base;
6813 const aarch64_opcode *opcode = base->opcode;
6814 const aarch64_opnd_info *opnds = base->operands;
6815 switch (opcode->iclass)
6822 /* Loading/storing the base register is unpredictable if writeback. */
6823 if ((aarch64_get_operand_class (opnds[0].type)
6824 == AARCH64_OPND_CLASS_INT_REG)
6825 && opnds[0].reg.regno == opnds[1].addr.base_regno
6826 && opnds[1].addr.base_regno != REG_SP
6827 /* Exempt STG/STZG/ST2G/STZ2G. */
6828 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
6829 && opnds[1].addr.writeback)
6830 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6834 case ldstnapair_offs:
6835 case ldstpair_indexed:
6836 /* Loading/storing the base register is unpredictable if writeback. */
6837 if ((aarch64_get_operand_class (opnds[0].type)
6838 == AARCH64_OPND_CLASS_INT_REG)
6839 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6840 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6841 && opnds[2].addr.base_regno != REG_SP
6843 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
6844 && opnds[2].addr.writeback)
6845 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6846 /* Load operations must load different registers. */
6847 if ((opcode->opcode & (1 << 22))
6848 && opnds[0].reg.regno == opnds[1].reg.regno)
6849 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6853 /* It is unpredictable if the destination and status registers are the
6855 if ((aarch64_get_operand_class (opnds[0].type)
6856 == AARCH64_OPND_CLASS_INT_REG)
6857 && (aarch64_get_operand_class (opnds[1].type)
6858 == AARCH64_OPND_CLASS_INT_REG)
6859 && (opnds[0].reg.regno == opnds[1].reg.regno
6860 || opnds[0].reg.regno == opnds[2].reg.regno))
6861 as_warn (_("unpredictable: identical transfer and status registers"
6873 force_automatic_sequence_close (void)
6875 if (now_instr_sequence.instr)
6877 as_warn (_("previous `%s' sequence has not been closed"),
6878 now_instr_sequence.instr->opcode->name);
6879 init_insn_sequence (NULL, &now_instr_sequence);
6883 /* A wrapper function to interface with libopcodes on encoding and
6884 record the error message if there is any.
6886 Return TRUE on success; otherwise return FALSE. */
6889 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6892 aarch64_operand_error error_info;
6893 memset (&error_info, '\0', sizeof (error_info));
6894 error_info.kind = AARCH64_OPDE_NIL;
6895 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
6896 && !error_info.non_fatal)
6899 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6900 record_operand_error_info (opcode, &error_info);
6901 return error_info.non_fatal;
6904 #ifdef DEBUG_AARCH64
6906 dump_opcode_operands (const aarch64_opcode *opcode)
6909 while (opcode->operands[i] != AARCH64_OPND_NIL)
6911 aarch64_verbose ("\t\t opnd%d: %s", i,
6912 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6913 ? aarch64_get_operand_name (opcode->operands[i])
6914 : aarch64_get_operand_desc (opcode->operands[i]));
6918 #endif /* DEBUG_AARCH64 */
6920 /* This is the guts of the machine-dependent assembler. STR points to a
6921 machine dependent instruction. This function is supposed to emit
6922 the frags/bytes it assembles to. */
6925 md_assemble (char *str)
6928 templates *template;
6929 aarch64_opcode *opcode;
6930 aarch64_inst *inst_base;
6931 unsigned saved_cond;
6933 /* Align the previous label if needed. */
6934 if (last_label_seen != NULL)
6936 symbol_set_frag (last_label_seen, frag_now);
6937 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6938 S_SET_SEGMENT (last_label_seen, now_seg);
6941 /* Update the current insn_sequence from the segment. */
6942 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
6944 inst.reloc.type = BFD_RELOC_UNUSED;
6946 DEBUG_TRACE ("\n\n");
6947 DEBUG_TRACE ("==============================");
6948 DEBUG_TRACE ("Enter md_assemble with %s", str);
6950 template = opcode_lookup (&p);
6953 /* It wasn't an instruction, but it might be a register alias of
6954 the form alias .req reg directive. */
6955 if (!create_register_alias (str, p))
6956 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6961 skip_whitespace (p);
6964 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6965 get_mnemonic_name (str), str);
6969 init_operand_error_report ();
6971 /* Sections are assumed to start aligned. In executable section, there is no
6972 MAP_DATA symbol pending. So we only align the address during
6973 MAP_DATA --> MAP_INSN transition.
6974 For other sections, this is not guaranteed. */
6975 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6976 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6977 frag_align_code (2, 0);
6979 saved_cond = inst.cond;
6980 reset_aarch64_instruction (&inst);
6981 inst.cond = saved_cond;
6983 /* Iterate through all opcode entries with the same mnemonic name. */
6986 opcode = template->opcode;
6988 DEBUG_TRACE ("opcode %s found", opcode->name);
6989 #ifdef DEBUG_AARCH64
6991 dump_opcode_operands (opcode);
6992 #endif /* DEBUG_AARCH64 */
6994 mapping_state (MAP_INSN);
6996 inst_base = &inst.base;
6997 inst_base->opcode = opcode;
6999 /* Truly conditionally executed instructions, e.g. b.cond. */
7000 if (opcode->flags & F_COND)
7002 gas_assert (inst.cond != COND_ALWAYS);
7003 inst_base->cond = get_cond_from_value (inst.cond);
7004 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7006 else if (inst.cond != COND_ALWAYS)
7008 /* It shouldn't arrive here, where the assembly looks like a
7009 conditional instruction but the found opcode is unconditional. */
7014 if (parse_operands (p, opcode)
7015 && programmer_friendly_fixup (&inst)
7016 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7018 /* Check that this instruction is supported for this CPU. */
7019 if (!opcode->avariant
7020 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7022 as_bad (_("selected processor does not support `%s'"), str);
7026 warn_unpredictable_ldst (&inst, str);
7028 if (inst.reloc.type == BFD_RELOC_UNUSED
7029 || !inst.reloc.need_libopcodes_p)
7033 /* If there is relocation generated for the instruction,
7034 store the instruction information for the future fix-up. */
7035 struct aarch64_inst *copy;
7036 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7037 copy = XNEW (struct aarch64_inst);
7038 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7042 /* Issue non-fatal messages if any. */
7043 output_operand_error_report (str, TRUE);
7047 template = template->next;
7048 if (template != NULL)
7050 reset_aarch64_instruction (&inst);
7051 inst.cond = saved_cond;
7054 while (template != NULL);
7056 /* Issue the error messages if any. */
7057 output_operand_error_report (str, FALSE);
7060 /* Various frobbings of labels and their addresses. */
7063 aarch64_start_line_hook (void)
7065 last_label_seen = NULL;
7069 aarch64_frob_label (symbolS * sym)
7071 last_label_seen = sym;
7073 dwarf2_emit_label (sym);
7077 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7079 /* Check to see if we have a block to close. */
7080 force_automatic_sequence_close ();
7084 aarch64_data_in_code (void)
7086 if (!strncmp (input_line_pointer + 1, "data:", 5))
7088 *input_line_pointer = '/';
7089 input_line_pointer += 5;
7090 *input_line_pointer = 0;
7098 aarch64_canonicalize_symbol_name (char *name)
7102 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7103 *(name + len - 5) = 0;
7108 /* Table of all register names defined by default. The user can
7109 define additional names with .req. Note that all register names
7110 should appear in both upper and lowercase variants. Some registers
7111 also have mixed-case names. */
7113 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7114 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7115 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7116 #define REGSET16(p,t) \
7117 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7118 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7119 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7120 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7121 #define REGSET31(p,t) \
7123 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7124 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7125 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7126 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7127 #define REGSET(p,t) \
7128 REGSET31(p,t), REGNUM(p,31,t)
7130 /* These go into aarch64_reg_hsh hash-table. */
7131 static const reg_entry reg_names[] = {
7132 /* Integer registers. */
7133 REGSET31 (x, R_64), REGSET31 (X, R_64),
7134 REGSET31 (w, R_32), REGSET31 (W, R_32),
7136 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7137 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7138 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7139 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7140 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7141 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7143 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7144 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7146 /* Floating-point single precision registers. */
7147 REGSET (s, FP_S), REGSET (S, FP_S),
7149 /* Floating-point double precision registers. */
7150 REGSET (d, FP_D), REGSET (D, FP_D),
7152 /* Floating-point half precision registers. */
7153 REGSET (h, FP_H), REGSET (H, FP_H),
7155 /* Floating-point byte precision registers. */
7156 REGSET (b, FP_B), REGSET (B, FP_B),
7158 /* Floating-point quad precision registers. */
7159 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7161 /* FP/SIMD registers. */
7162 REGSET (v, VN), REGSET (V, VN),
7164 /* SVE vector registers. */
7165 REGSET (z, ZN), REGSET (Z, ZN),
7167 /* SVE predicate registers. */
7168 REGSET16 (p, PN), REGSET16 (P, PN)
7186 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7187 static const asm_nzcv nzcv_names[] = {
7188 {"nzcv", B (n, z, c, v)},
7189 {"nzcV", B (n, z, c, V)},
7190 {"nzCv", B (n, z, C, v)},
7191 {"nzCV", B (n, z, C, V)},
7192 {"nZcv", B (n, Z, c, v)},
7193 {"nZcV", B (n, Z, c, V)},
7194 {"nZCv", B (n, Z, C, v)},
7195 {"nZCV", B (n, Z, C, V)},
7196 {"Nzcv", B (N, z, c, v)},
7197 {"NzcV", B (N, z, c, V)},
7198 {"NzCv", B (N, z, C, v)},
7199 {"NzCV", B (N, z, C, V)},
7200 {"NZcv", B (N, Z, c, v)},
7201 {"NZcV", B (N, Z, c, V)},
7202 {"NZCv", B (N, Z, C, v)},
7203 {"NZCV", B (N, Z, C, V)}
7216 /* MD interface: bits in the object file. */
7218 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7219 for use in the a.out file, and stores them in the array pointed to by buf.
7220 This knows about the endian-ness of the target machine and does
7221 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7222 2 (short) and 4 (long) Floating numbers are put out as a series of
7223 LITTLENUMS (shorts, here at least). */
7226 md_number_to_chars (char *buf, valueT val, int n)
7228 if (target_big_endian)
7229 number_to_chars_bigendian (buf, val, n);
7231 number_to_chars_littleendian (buf, val, n);
7234 /* MD interface: Sections. */
7236 /* Estimate the size of a frag before relaxing. Assume everything fits in
7240 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7246 /* Round up a section size to the appropriate boundary. */
7249 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7254 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7255 of an rs_align_code fragment.
7257 Here we fill the frag with the appropriate info for padding the
7258 output stream. The resulting frag will consist of a fixed (fr_fix)
7259 and of a repeating (fr_var) part.
7261 The fixed content is always emitted before the repeating content and
7262 these two parts are used as follows in constructing the output:
7263 - the fixed part will be used to align to a valid instruction word
7264 boundary, in case that we start at a misaligned address; as no
7265 executable instruction can live at the misaligned location, we
7266 simply fill with zeros;
7267 - the variable part will be used to cover the remaining padding and
7268 we fill using the AArch64 NOP instruction.
7270 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7271 enough storage space for up to 3 bytes for padding the back to a valid
7272 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7275 aarch64_handle_align (fragS * fragP)
7277 /* NOP = d503201f */
7278 /* AArch64 instructions are always little-endian. */
7279 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7281 int bytes, fix, noop_size;
7284 if (fragP->fr_type != rs_align_code)
7287 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7288 p = fragP->fr_literal + fragP->fr_fix;
7291 gas_assert (fragP->tc_frag_data.recorded);
7294 noop_size = sizeof (aarch64_noop);
7296 fix = bytes & (noop_size - 1);
7300 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7304 fragP->fr_fix += fix;
7308 memcpy (p, aarch64_noop, noop_size);
7309 fragP->fr_var = noop_size;
7312 /* Perform target specific initialisation of a frag.
7313 Note - despite the name this initialisation is not done when the frag
7314 is created, but only when its type is assigned. A frag can be created
7315 and used a long time before its type is set, so beware of assuming that
7316 this initialisation is performed first. */
7320 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7321 int max_chars ATTRIBUTE_UNUSED)
7325 #else /* OBJ_ELF is defined. */
7327 aarch64_init_frag (fragS * fragP, int max_chars)
7329 /* Record a mapping symbol for alignment frags. We will delete this
7330 later if the alignment ends up empty. */
7331 if (!fragP->tc_frag_data.recorded)
7332 fragP->tc_frag_data.recorded = 1;
7334 /* PR 21809: Do not set a mapping state for debug sections
7335 - it just confuses other tools. */
7336 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
7339 switch (fragP->fr_type)
7343 mapping_state_2 (MAP_DATA, max_chars);
7346 /* PR 20364: We can get alignment frags in code sections,
7347 so do not just assume that we should use the MAP_DATA state. */
7348 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7351 mapping_state_2 (MAP_INSN, max_chars);
7358 /* Initialize the DWARF-2 unwind information for this procedure. */
7361 tc_aarch64_frame_initial_instructions (void)
7363 cfi_add_CFA_def_cfa (REG_SP, 0);
7365 #endif /* OBJ_ELF */
7367 /* Convert REGNAME to a DWARF-2 register number. */
7370 tc_aarch64_regname_to_dw2regnum (char *regname)
7372 const reg_entry *reg = parse_reg (®name);
7378 case REG_TYPE_SP_32:
7379 case REG_TYPE_SP_64:
7389 return reg->number + 64;
7397 /* Implement DWARF2_ADDR_SIZE. */
7400 aarch64_dwarf2_addr_size (void)
7402 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7406 return bfd_arch_bits_per_address (stdoutput) / 8;
7409 /* MD interface: Symbol and relocation handling. */
7411 /* Return the address within the segment that a PC-relative fixup is
7412 relative to. For AArch64 PC-relative fixups applied to instructions
7413 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7416 md_pcrel_from_section (fixS * fixP, segT seg)
7418 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7420 /* If this is pc-relative and we are going to emit a relocation
7421 then we just want to put out any pipeline compensation that the linker
7422 will need. Otherwise we want to use the calculated base. */
7424 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7425 || aarch64_force_relocation (fixP)))
7428 /* AArch64 should be consistent for all pc-relative relocations. */
7429 return base + AARCH64_PCREL_OFFSET;
7432 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7433 Otherwise we have no need to default values of symbols. */
7436 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7439 if (name[0] == '_' && name[1] == 'G'
7440 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7444 if (symbol_find (name))
7445 as_bad (_("GOT already in the symbol table"));
7447 GOT_symbol = symbol_new (name, undefined_section,
7448 (valueT) 0, &zero_address_frag);
7458 /* Return non-zero if the indicated VALUE has overflowed the maximum
7459 range expressible by a unsigned number with the indicated number of
7463 unsigned_overflow (valueT value, unsigned bits)
7466 if (bits >= sizeof (valueT) * 8)
7468 lim = (valueT) 1 << bits;
7469 return (value >= lim);
7473 /* Return non-zero if the indicated VALUE has overflowed the maximum
7474 range expressible by an signed number with the indicated number of
7478 signed_overflow (offsetT value, unsigned bits)
7481 if (bits >= sizeof (offsetT) * 8)
7483 lim = (offsetT) 1 << (bits - 1);
7484 return (value < -lim || value >= lim);
7487 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7488 unsigned immediate offset load/store instruction, try to encode it as
7489 an unscaled, 9-bit, signed immediate offset load/store instruction.
7490 Return TRUE if it is successful; otherwise return FALSE.
7492 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7493 in response to the standard LDR/STR mnemonics when the immediate offset is
7494 unambiguous, i.e. when it is negative or unaligned. */
7497 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7500 enum aarch64_op new_op;
7501 const aarch64_opcode *new_opcode;
7503 gas_assert (instr->opcode->iclass == ldst_pos);
7505 switch (instr->opcode->op)
7507 case OP_LDRB_POS:new_op = OP_LDURB; break;
7508 case OP_STRB_POS: new_op = OP_STURB; break;
7509 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7510 case OP_LDRH_POS: new_op = OP_LDURH; break;
7511 case OP_STRH_POS: new_op = OP_STURH; break;
7512 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7513 case OP_LDR_POS: new_op = OP_LDUR; break;
7514 case OP_STR_POS: new_op = OP_STUR; break;
7515 case OP_LDRF_POS: new_op = OP_LDURV; break;
7516 case OP_STRF_POS: new_op = OP_STURV; break;
7517 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7518 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7519 default: new_op = OP_NIL; break;
7522 if (new_op == OP_NIL)
7525 new_opcode = aarch64_get_opcode (new_op);
7526 gas_assert (new_opcode != NULL);
7528 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7529 instr->opcode->op, new_opcode->op);
7531 aarch64_replace_opcode (instr, new_opcode);
7533 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7534 qualifier matching may fail because the out-of-date qualifier will
7535 prevent the operand being updated with a new and correct qualifier. */
7536 idx = aarch64_operand_index (instr->opcode->operands,
7537 AARCH64_OPND_ADDR_SIMM9);
7538 gas_assert (idx == 1);
7539 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7541 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7543 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7550 /* Called by fix_insn to fix a MOV immediate alias instruction.
7552 Operand for a generic move immediate instruction, which is an alias
7553 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7554 a 32-bit/64-bit immediate value into general register. An assembler error
7555 shall result if the immediate cannot be created by a single one of these
7556 instructions. If there is a choice, then to ensure reversability an
7557 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7560 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7562 const aarch64_opcode *opcode;
7564 /* Need to check if the destination is SP/ZR. The check has to be done
7565 before any aarch64_replace_opcode. */
7566 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7567 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7569 instr->operands[1].imm.value = value;
7570 instr->operands[1].skip = 0;
7574 /* Try the MOVZ alias. */
7575 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7576 aarch64_replace_opcode (instr, opcode);
7577 if (aarch64_opcode_encode (instr->opcode, instr,
7578 &instr->value, NULL, NULL, insn_sequence))
7580 put_aarch64_insn (buf, instr->value);
7583 /* Try the MOVK alias. */
7584 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7585 aarch64_replace_opcode (instr, opcode);
7586 if (aarch64_opcode_encode (instr->opcode, instr,
7587 &instr->value, NULL, NULL, insn_sequence))
7589 put_aarch64_insn (buf, instr->value);
7594 if (try_mov_bitmask_p)
7596 /* Try the ORR alias. */
7597 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7598 aarch64_replace_opcode (instr, opcode);
7599 if (aarch64_opcode_encode (instr->opcode, instr,
7600 &instr->value, NULL, NULL, insn_sequence))
7602 put_aarch64_insn (buf, instr->value);
7607 as_bad_where (fixP->fx_file, fixP->fx_line,
7608 _("immediate cannot be moved by a single instruction"));
7611 /* An instruction operand which is immediate related may have symbol used
7612 in the assembly, e.g.
7615 .set u32, 0x00ffff00
7617 At the time when the assembly instruction is parsed, a referenced symbol,
7618 like 'u32' in the above example may not have been seen; a fixS is created
7619 in such a case and is handled here after symbols have been resolved.
7620 Instruction is fixed up with VALUE using the information in *FIXP plus
7621 extra information in FLAGS.
7623 This function is called by md_apply_fix to fix up instructions that need
7624 a fix-up described above but does not involve any linker-time relocation. */
7627 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7631 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7632 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7633 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7637 /* Now the instruction is about to be fixed-up, so the operand that
7638 was previously marked as 'ignored' needs to be unmarked in order
7639 to get the encoding done properly. */
7640 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7641 new_inst->operands[idx].skip = 0;
7644 gas_assert (opnd != AARCH64_OPND_NIL);
7648 case AARCH64_OPND_EXCEPTION:
7649 if (unsigned_overflow (value, 16))
7650 as_bad_where (fixP->fx_file, fixP->fx_line,
7651 _("immediate out of range"));
7652 insn = get_aarch64_insn (buf);
7653 insn |= encode_svc_imm (value);
7654 put_aarch64_insn (buf, insn);
7657 case AARCH64_OPND_AIMM:
7658 /* ADD or SUB with immediate.
7659 NOTE this assumes we come here with a add/sub shifted reg encoding
7660 3 322|2222|2 2 2 21111 111111
7661 1 098|7654|3 2 1 09876 543210 98765 43210
7662 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7663 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7664 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7665 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7667 3 322|2222|2 2 221111111111
7668 1 098|7654|3 2 109876543210 98765 43210
7669 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7670 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7671 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7672 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7673 Fields sf Rn Rd are already set. */
7674 insn = get_aarch64_insn (buf);
7678 insn = reencode_addsub_switch_add_sub (insn);
7682 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7683 && unsigned_overflow (value, 12))
7685 /* Try to shift the value by 12 to make it fit. */
7686 if (((value >> 12) << 12) == value
7687 && ! unsigned_overflow (value, 12 + 12))
7690 insn |= encode_addsub_imm_shift_amount (1);
7694 if (unsigned_overflow (value, 12))
7695 as_bad_where (fixP->fx_file, fixP->fx_line,
7696 _("immediate out of range"));
7698 insn |= encode_addsub_imm (value);
7700 put_aarch64_insn (buf, insn);
7703 case AARCH64_OPND_SIMD_IMM:
7704 case AARCH64_OPND_SIMD_IMM_SFT:
7705 case AARCH64_OPND_LIMM:
7706 /* Bit mask immediate. */
7707 gas_assert (new_inst != NULL);
7708 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7709 new_inst->operands[idx].imm.value = value;
7710 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7711 &new_inst->value, NULL, NULL, insn_sequence))
7712 put_aarch64_insn (buf, new_inst->value);
7714 as_bad_where (fixP->fx_file, fixP->fx_line,
7715 _("invalid immediate"));
7718 case AARCH64_OPND_HALF:
7719 /* 16-bit unsigned immediate. */
7720 if (unsigned_overflow (value, 16))
7721 as_bad_where (fixP->fx_file, fixP->fx_line,
7722 _("immediate out of range"));
7723 insn = get_aarch64_insn (buf);
7724 insn |= encode_movw_imm (value & 0xffff);
7725 put_aarch64_insn (buf, insn);
7728 case AARCH64_OPND_IMM_MOV:
7729 /* Operand for a generic move immediate instruction, which is
7730 an alias instruction that generates a single MOVZ, MOVN or ORR
7731 instruction to loads a 32-bit/64-bit immediate value into general
7732 register. An assembler error shall result if the immediate cannot be
7733 created by a single one of these instructions. If there is a choice,
7734 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7735 and MOVZ or MOVN to ORR. */
7736 gas_assert (new_inst != NULL);
7737 fix_mov_imm_insn (fixP, buf, new_inst, value);
7740 case AARCH64_OPND_ADDR_SIMM7:
7741 case AARCH64_OPND_ADDR_SIMM9:
7742 case AARCH64_OPND_ADDR_SIMM9_2:
7743 case AARCH64_OPND_ADDR_SIMM10:
7744 case AARCH64_OPND_ADDR_UIMM12:
7745 case AARCH64_OPND_ADDR_SIMM11:
7746 case AARCH64_OPND_ADDR_SIMM13:
7747 /* Immediate offset in an address. */
7748 insn = get_aarch64_insn (buf);
7750 gas_assert (new_inst != NULL && new_inst->value == insn);
7751 gas_assert (new_inst->opcode->operands[1] == opnd
7752 || new_inst->opcode->operands[2] == opnd);
7754 /* Get the index of the address operand. */
7755 if (new_inst->opcode->operands[1] == opnd)
7756 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7759 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7762 /* Update the resolved offset value. */
7763 new_inst->operands[idx].addr.offset.imm = value;
7765 /* Encode/fix-up. */
7766 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7767 &new_inst->value, NULL, NULL, insn_sequence))
7769 put_aarch64_insn (buf, new_inst->value);
7772 else if (new_inst->opcode->iclass == ldst_pos
7773 && try_to_encode_as_unscaled_ldst (new_inst))
7775 put_aarch64_insn (buf, new_inst->value);
7779 as_bad_where (fixP->fx_file, fixP->fx_line,
7780 _("immediate offset out of range"));
7785 as_fatal (_("unhandled operand code %d"), opnd);
7789 /* Apply a fixup (fixP) to segment data, once it has been determined
7790 by our caller that we have all the info we need to fix it up.
7792 Parameter valP is the pointer to the value of the bits. */
7795 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7797 offsetT value = *valP;
7799 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7801 unsigned flags = fixP->fx_addnumber;
7803 DEBUG_TRACE ("\n\n");
7804 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7805 DEBUG_TRACE ("Enter md_apply_fix");
7807 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7809 /* Note whether this will delete the relocation. */
7811 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7814 /* Process the relocations. */
7815 switch (fixP->fx_r_type)
7817 case BFD_RELOC_NONE:
7818 /* This will need to go in the object file. */
7823 case BFD_RELOC_8_PCREL:
7824 if (fixP->fx_done || !seg->use_rela_p)
7825 md_number_to_chars (buf, value, 1);
7829 case BFD_RELOC_16_PCREL:
7830 if (fixP->fx_done || !seg->use_rela_p)
7831 md_number_to_chars (buf, value, 2);
7835 case BFD_RELOC_32_PCREL:
7836 if (fixP->fx_done || !seg->use_rela_p)
7837 md_number_to_chars (buf, value, 4);
7841 case BFD_RELOC_64_PCREL:
7842 if (fixP->fx_done || !seg->use_rela_p)
7843 md_number_to_chars (buf, value, 8);
7846 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7847 /* We claim that these fixups have been processed here, even if
7848 in fact we generate an error because we do not have a reloc
7849 for them, so tc_gen_reloc() will reject them. */
7851 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7853 as_bad_where (fixP->fx_file, fixP->fx_line,
7854 _("undefined symbol %s used as an immediate value"),
7855 S_GET_NAME (fixP->fx_addsy));
7856 goto apply_fix_return;
7858 fix_insn (fixP, flags, value);
7861 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7862 if (fixP->fx_done || !seg->use_rela_p)
7865 as_bad_where (fixP->fx_file, fixP->fx_line,
7866 _("pc-relative load offset not word aligned"));
7867 if (signed_overflow (value, 21))
7868 as_bad_where (fixP->fx_file, fixP->fx_line,
7869 _("pc-relative load offset out of range"));
7870 insn = get_aarch64_insn (buf);
7871 insn |= encode_ld_lit_ofs_19 (value >> 2);
7872 put_aarch64_insn (buf, insn);
7876 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7877 if (fixP->fx_done || !seg->use_rela_p)
7879 if (signed_overflow (value, 21))
7880 as_bad_where (fixP->fx_file, fixP->fx_line,
7881 _("pc-relative address offset out of range"));
7882 insn = get_aarch64_insn (buf);
7883 insn |= encode_adr_imm (value);
7884 put_aarch64_insn (buf, insn);
7888 case BFD_RELOC_AARCH64_BRANCH19:
7889 if (fixP->fx_done || !seg->use_rela_p)
7892 as_bad_where (fixP->fx_file, fixP->fx_line,
7893 _("conditional branch target not word aligned"));
7894 if (signed_overflow (value, 21))
7895 as_bad_where (fixP->fx_file, fixP->fx_line,
7896 _("conditional branch out of range"));
7897 insn = get_aarch64_insn (buf);
7898 insn |= encode_cond_branch_ofs_19 (value >> 2);
7899 put_aarch64_insn (buf, insn);
7903 case BFD_RELOC_AARCH64_TSTBR14:
7904 if (fixP->fx_done || !seg->use_rela_p)
7907 as_bad_where (fixP->fx_file, fixP->fx_line,
7908 _("conditional branch target not word aligned"));
7909 if (signed_overflow (value, 16))
7910 as_bad_where (fixP->fx_file, fixP->fx_line,
7911 _("conditional branch out of range"));
7912 insn = get_aarch64_insn (buf);
7913 insn |= encode_tst_branch_ofs_14 (value >> 2);
7914 put_aarch64_insn (buf, insn);
7918 case BFD_RELOC_AARCH64_CALL26:
7919 case BFD_RELOC_AARCH64_JUMP26:
7920 if (fixP->fx_done || !seg->use_rela_p)
7923 as_bad_where (fixP->fx_file, fixP->fx_line,
7924 _("branch target not word aligned"));
7925 if (signed_overflow (value, 28))
7926 as_bad_where (fixP->fx_file, fixP->fx_line,
7927 _("branch out of range"));
7928 insn = get_aarch64_insn (buf);
7929 insn |= encode_branch_ofs_26 (value >> 2);
7930 put_aarch64_insn (buf, insn);
7934 case BFD_RELOC_AARCH64_MOVW_G0:
7935 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7936 case BFD_RELOC_AARCH64_MOVW_G0_S:
7937 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7938 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7939 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
7942 case BFD_RELOC_AARCH64_MOVW_G1:
7943 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7944 case BFD_RELOC_AARCH64_MOVW_G1_S:
7945 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7946 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7947 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
7950 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7952 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7953 /* Should always be exported to object file, see
7954 aarch64_force_relocation(). */
7955 gas_assert (!fixP->fx_done);
7956 gas_assert (seg->use_rela_p);
7958 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7960 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7961 /* Should always be exported to object file, see
7962 aarch64_force_relocation(). */
7963 gas_assert (!fixP->fx_done);
7964 gas_assert (seg->use_rela_p);
7966 case BFD_RELOC_AARCH64_MOVW_G2:
7967 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7968 case BFD_RELOC_AARCH64_MOVW_G2_S:
7969 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
7970 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
7973 case BFD_RELOC_AARCH64_MOVW_G3:
7974 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
7977 if (fixP->fx_done || !seg->use_rela_p)
7979 insn = get_aarch64_insn (buf);
7983 /* REL signed addend must fit in 16 bits */
7984 if (signed_overflow (value, 16))
7985 as_bad_where (fixP->fx_file, fixP->fx_line,
7986 _("offset out of range"));
7990 /* Check for overflow and scale. */
7991 switch (fixP->fx_r_type)
7993 case BFD_RELOC_AARCH64_MOVW_G0:
7994 case BFD_RELOC_AARCH64_MOVW_G1:
7995 case BFD_RELOC_AARCH64_MOVW_G2:
7996 case BFD_RELOC_AARCH64_MOVW_G3:
7997 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7998 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7999 if (unsigned_overflow (value, scale + 16))
8000 as_bad_where (fixP->fx_file, fixP->fx_line,
8001 _("unsigned value out of range"));
8003 case BFD_RELOC_AARCH64_MOVW_G0_S:
8004 case BFD_RELOC_AARCH64_MOVW_G1_S:
8005 case BFD_RELOC_AARCH64_MOVW_G2_S:
8006 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8007 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8008 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8009 /* NOTE: We can only come here with movz or movn. */
8010 if (signed_overflow (value, scale + 16))
8011 as_bad_where (fixP->fx_file, fixP->fx_line,
8012 _("signed value out of range"));
8015 /* Force use of MOVN. */
8017 insn = reencode_movzn_to_movn (insn);
8021 /* Force use of MOVZ. */
8022 insn = reencode_movzn_to_movz (insn);
8026 /* Unchecked relocations. */
8032 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8033 insn |= encode_movw_imm (value & 0xffff);
8035 put_aarch64_insn (buf, insn);
8039 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8040 fixP->fx_r_type = (ilp32_p
8041 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8042 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8043 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8044 /* Should always be exported to object file, see
8045 aarch64_force_relocation(). */
8046 gas_assert (!fixP->fx_done);
8047 gas_assert (seg->use_rela_p);
8050 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8051 fixP->fx_r_type = (ilp32_p
8052 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8053 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8054 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8055 /* Should always be exported to object file, see
8056 aarch64_force_relocation(). */
8057 gas_assert (!fixP->fx_done);
8058 gas_assert (seg->use_rela_p);
8061 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8062 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8063 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8064 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8065 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8066 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8067 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8068 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8069 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8070 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8071 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8072 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8073 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8074 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8075 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8076 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8077 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8078 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8079 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8080 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8081 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8082 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8083 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8084 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8085 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8086 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8087 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8088 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8089 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8090 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8091 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8092 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8093 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8094 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8095 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8096 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8097 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8098 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8099 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8100 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8101 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8102 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8103 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8104 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8105 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8106 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8107 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8108 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8109 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8110 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8111 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8112 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8113 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8114 /* Should always be exported to object file, see
8115 aarch64_force_relocation(). */
8116 gas_assert (!fixP->fx_done);
8117 gas_assert (seg->use_rela_p);
8120 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8121 /* Should always be exported to object file, see
8122 aarch64_force_relocation(). */
8123 fixP->fx_r_type = (ilp32_p
8124 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8125 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8126 gas_assert (!fixP->fx_done);
8127 gas_assert (seg->use_rela_p);
8130 case BFD_RELOC_AARCH64_ADD_LO12:
8131 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8132 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8133 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8134 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8135 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8136 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8137 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8138 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8139 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8140 case BFD_RELOC_AARCH64_LDST128_LO12:
8141 case BFD_RELOC_AARCH64_LDST16_LO12:
8142 case BFD_RELOC_AARCH64_LDST32_LO12:
8143 case BFD_RELOC_AARCH64_LDST64_LO12:
8144 case BFD_RELOC_AARCH64_LDST8_LO12:
8145 /* Should always be exported to object file, see
8146 aarch64_force_relocation(). */
8147 gas_assert (!fixP->fx_done);
8148 gas_assert (seg->use_rela_p);
8151 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8152 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8153 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8156 case BFD_RELOC_UNUSED:
8157 /* An error will already have been reported. */
8161 as_bad_where (fixP->fx_file, fixP->fx_line,
8162 _("unexpected %s fixup"),
8163 bfd_get_reloc_code_name (fixP->fx_r_type));
8168 /* Free the allocated the struct aarch64_inst.
8169 N.B. currently there are very limited number of fix-up types actually use
8170 this field, so the impact on the performance should be minimal . */
8171 if (fixP->tc_fix_data.inst != NULL)
8172 free (fixP->tc_fix_data.inst);
8177 /* Translate internal representation of relocation info to BFD target
8181 tc_gen_reloc (asection * section, fixS * fixp)
8184 bfd_reloc_code_real_type code;
8186 reloc = XNEW (arelent);
8188 reloc->sym_ptr_ptr = XNEW (asymbol *);
8189 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8190 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8194 if (section->use_rela_p)
8195 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8197 fixp->fx_offset = reloc->address;
8199 reloc->addend = fixp->fx_offset;
8201 code = fixp->fx_r_type;
8206 code = BFD_RELOC_16_PCREL;
8211 code = BFD_RELOC_32_PCREL;
8216 code = BFD_RELOC_64_PCREL;
8223 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8224 if (reloc->howto == NULL)
8226 as_bad_where (fixp->fx_file, fixp->fx_line,
8228 ("cannot represent %s relocation in this object file format"),
8229 bfd_get_reloc_code_name (code));
8236 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8239 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8241 bfd_reloc_code_real_type type;
8245 FIXME: @@ Should look at CPU word size. */
8252 type = BFD_RELOC_16;
8255 type = BFD_RELOC_32;
8258 type = BFD_RELOC_64;
8261 as_bad (_("cannot do %u-byte relocation"), size);
8262 type = BFD_RELOC_UNUSED;
8266 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8270 aarch64_force_relocation (struct fix *fixp)
8272 switch (fixp->fx_r_type)
8274 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8275 /* Perform these "immediate" internal relocations
8276 even if the symbol is extern or weak. */
8279 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8280 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8281 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8282 /* Pseudo relocs that need to be fixed up according to
8286 case BFD_RELOC_AARCH64_ADD_LO12:
8287 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8288 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8289 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8290 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8291 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8292 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8293 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8294 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8295 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8296 case BFD_RELOC_AARCH64_LDST128_LO12:
8297 case BFD_RELOC_AARCH64_LDST16_LO12:
8298 case BFD_RELOC_AARCH64_LDST32_LO12:
8299 case BFD_RELOC_AARCH64_LDST64_LO12:
8300 case BFD_RELOC_AARCH64_LDST8_LO12:
8301 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8302 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8303 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8304 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8305 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8306 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8307 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8308 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8309 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8310 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8311 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8312 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8313 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8314 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8315 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8316 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8317 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8318 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8319 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8320 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8321 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8322 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8323 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8324 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8325 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8326 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8327 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8328 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8329 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8330 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8331 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8332 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8333 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8334 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8335 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8336 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8337 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8338 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8339 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8340 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8341 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8342 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8343 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8344 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8345 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8346 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8347 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8348 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8349 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8350 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8351 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8352 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8353 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8354 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8355 /* Always leave these relocations for the linker. */
8362 return generic_force_reloc (fixp);
8367 /* Implement md_after_parse_args. This is the earliest time we need to decide
8368 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8371 aarch64_after_parse_args (void)
8373 if (aarch64_abi != AARCH64_ABI_NONE)
8376 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8377 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8378 aarch64_abi = AARCH64_ABI_ILP32;
8380 aarch64_abi = AARCH64_ABI_LP64;
8384 elf64_aarch64_target_format (void)
8387 /* FIXME: What to do for ilp32_p ? */
8388 if (target_big_endian)
8389 return "elf64-bigaarch64-cloudabi";
8391 return "elf64-littleaarch64-cloudabi";
8393 if (target_big_endian)
8394 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8396 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8401 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8403 elf_frob_symbol (symp, puntp);
8407 /* MD interface: Finalization. */
8409 /* A good place to do this, although this was probably not intended
8410 for this kind of use. We need to dump the literal pool before
8411 references are made to a null symbol pointer. */
8414 aarch64_cleanup (void)
8418 for (pool = list_of_pools; pool; pool = pool->next)
8420 /* Put it at the end of the relevant section. */
8421 subseg_set (pool->section, pool->sub_section);
8427 /* Remove any excess mapping symbols generated for alignment frags in
8428 SEC. We may have created a mapping symbol before a zero byte
8429 alignment; remove it if there's a mapping symbol after the
8432 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8433 void *dummy ATTRIBUTE_UNUSED)
8435 segment_info_type *seginfo = seg_info (sec);
8438 if (seginfo == NULL || seginfo->frchainP == NULL)
8441 for (fragp = seginfo->frchainP->frch_root;
8442 fragp != NULL; fragp = fragp->fr_next)
8444 symbolS *sym = fragp->tc_frag_data.last_map;
8445 fragS *next = fragp->fr_next;
8447 /* Variable-sized frags have been converted to fixed size by
8448 this point. But if this was variable-sized to start with,
8449 there will be a fixed-size frag after it. So don't handle
8451 if (sym == NULL || next == NULL)
8454 if (S_GET_VALUE (sym) < next->fr_address)
8455 /* Not at the end of this frag. */
8457 know (S_GET_VALUE (sym) == next->fr_address);
8461 if (next->tc_frag_data.first_map != NULL)
8463 /* Next frag starts with a mapping symbol. Discard this
8465 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8469 if (next->fr_next == NULL)
8471 /* This mapping symbol is at the end of the section. Discard
8473 know (next->fr_fix == 0 && next->fr_var == 0);
8474 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8478 /* As long as we have empty frags without any mapping symbols,
8480 /* If the next frag is non-empty and does not start with a
8481 mapping symbol, then this mapping symbol is required. */
8482 if (next->fr_address != next->fr_next->fr_address)
8485 next = next->fr_next;
8487 while (next != NULL);
8492 /* Adjust the symbol table. */
8495 aarch64_adjust_symtab (void)
8498 /* Remove any overlapping mapping symbols generated by alignment frags. */
8499 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8500 /* Now do generic ELF adjustments. */
8501 elf_adjust_symtab ();
8506 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8508 const char *hash_err;
8510 hash_err = hash_insert (table, key, value);
8512 printf ("Internal Error: Can't hash %s\n", key);
8516 fill_instruction_hash_table (void)
8518 aarch64_opcode *opcode = aarch64_opcode_table;
8520 while (opcode->name != NULL)
8522 templates *templ, *new_templ;
8523 templ = hash_find (aarch64_ops_hsh, opcode->name);
8525 new_templ = XNEW (templates);
8526 new_templ->opcode = opcode;
8527 new_templ->next = NULL;
8530 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8533 new_templ->next = templ->next;
8534 templ->next = new_templ;
8541 convert_to_upper (char *dst, const char *src, size_t num)
8544 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8545 *dst = TOUPPER (*src);
8549 /* Assume STR point to a lower-case string, allocate, convert and return
8550 the corresponding upper-case string. */
8551 static inline const char*
8552 get_upper_str (const char *str)
8555 size_t len = strlen (str);
8556 ret = XNEWVEC (char, len + 1);
8557 convert_to_upper (ret, str, len);
8561 /* MD interface: Initialization. */
8569 if ((aarch64_ops_hsh = hash_new ()) == NULL
8570 || (aarch64_cond_hsh = hash_new ()) == NULL
8571 || (aarch64_shift_hsh = hash_new ()) == NULL
8572 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8573 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8574 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8575 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8576 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8577 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8578 || (aarch64_sys_regs_sr_hsh = hash_new ()) == NULL
8579 || (aarch64_reg_hsh = hash_new ()) == NULL
8580 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8581 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8582 || (aarch64_pldop_hsh = hash_new ()) == NULL
8583 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8584 as_fatal (_("virtual memory exhausted"));
8586 fill_instruction_hash_table ();
8588 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8589 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8590 (void *) (aarch64_sys_regs + i));
8592 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8593 checked_hash_insert (aarch64_pstatefield_hsh,
8594 aarch64_pstatefields[i].name,
8595 (void *) (aarch64_pstatefields + i));
8597 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8598 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8599 aarch64_sys_regs_ic[i].name,
8600 (void *) (aarch64_sys_regs_ic + i));
8602 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8603 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8604 aarch64_sys_regs_dc[i].name,
8605 (void *) (aarch64_sys_regs_dc + i));
8607 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8608 checked_hash_insert (aarch64_sys_regs_at_hsh,
8609 aarch64_sys_regs_at[i].name,
8610 (void *) (aarch64_sys_regs_at + i));
8612 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8613 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8614 aarch64_sys_regs_tlbi[i].name,
8615 (void *) (aarch64_sys_regs_tlbi + i));
8617 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8618 checked_hash_insert (aarch64_sys_regs_sr_hsh,
8619 aarch64_sys_regs_sr[i].name,
8620 (void *) (aarch64_sys_regs_sr + i));
8622 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8623 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8624 (void *) (reg_names + i));
8626 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8627 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8628 (void *) (nzcv_names + i));
8630 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8632 const char *name = aarch64_operand_modifiers[i].name;
8633 checked_hash_insert (aarch64_shift_hsh, name,
8634 (void *) (aarch64_operand_modifiers + i));
8635 /* Also hash the name in the upper case. */
8636 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8637 (void *) (aarch64_operand_modifiers + i));
8640 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8643 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8644 the same condition code. */
8645 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8647 const char *name = aarch64_conds[i].names[j];
8650 checked_hash_insert (aarch64_cond_hsh, name,
8651 (void *) (aarch64_conds + i));
8652 /* Also hash the name in the upper case. */
8653 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8654 (void *) (aarch64_conds + i));
8658 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8660 const char *name = aarch64_barrier_options[i].name;
8661 /* Skip xx00 - the unallocated values of option. */
8664 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8665 (void *) (aarch64_barrier_options + i));
8666 /* Also hash the name in the upper case. */
8667 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8668 (void *) (aarch64_barrier_options + i));
8671 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8673 const char* name = aarch64_prfops[i].name;
8674 /* Skip the unallocated hint encodings. */
8677 checked_hash_insert (aarch64_pldop_hsh, name,
8678 (void *) (aarch64_prfops + i));
8679 /* Also hash the name in the upper case. */
8680 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8681 (void *) (aarch64_prfops + i));
8684 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8686 const char* name = aarch64_hint_options[i].name;
8688 checked_hash_insert (aarch64_hint_opt_hsh, name,
8689 (void *) (aarch64_hint_options + i));
8690 /* Also hash the name in the upper case. */
8691 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8692 (void *) (aarch64_hint_options + i));
8695 /* Set the cpu variant based on the command-line options. */
8697 mcpu_cpu_opt = march_cpu_opt;
8700 mcpu_cpu_opt = &cpu_default;
8702 cpu_variant = *mcpu_cpu_opt;
8704 /* Record the CPU type. */
8705 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8707 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8710 /* Command line processing. */
8712 const char *md_shortopts = "m:";
8714 #ifdef AARCH64_BI_ENDIAN
8715 #define OPTION_EB (OPTION_MD_BASE + 0)
8716 #define OPTION_EL (OPTION_MD_BASE + 1)
8718 #if TARGET_BYTES_BIG_ENDIAN
8719 #define OPTION_EB (OPTION_MD_BASE + 0)
8721 #define OPTION_EL (OPTION_MD_BASE + 1)
8725 struct option md_longopts[] = {
8727 {"EB", no_argument, NULL, OPTION_EB},
8730 {"EL", no_argument, NULL, OPTION_EL},
8732 {NULL, no_argument, NULL, 0}
8735 size_t md_longopts_size = sizeof (md_longopts);
8737 struct aarch64_option_table
8739 const char *option; /* Option name to match. */
8740 const char *help; /* Help information. */
8741 int *var; /* Variable to change. */
8742 int value; /* What to change it to. */
8743 char *deprecated; /* If non-null, print this message. */
8746 static struct aarch64_option_table aarch64_opts[] = {
8747 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8748 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8750 #ifdef DEBUG_AARCH64
8751 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8752 #endif /* DEBUG_AARCH64 */
8753 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8755 {"mno-verbose-error", N_("do not output verbose error messages"),
8756 &verbose_error_p, 0, NULL},
8757 {NULL, NULL, NULL, 0, NULL}
8760 struct aarch64_cpu_option_table
8763 const aarch64_feature_set value;
8764 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8766 const char *canonical_name;
8769 /* This list should, at a minimum, contain all the cpu names
8770 recognized by GCC. */
8771 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8772 {"all", AARCH64_ANY, NULL},
8773 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8774 AARCH64_FEATURE_CRC), "Cortex-A35"},
8775 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8776 AARCH64_FEATURE_CRC), "Cortex-A53"},
8777 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8778 AARCH64_FEATURE_CRC), "Cortex-A57"},
8779 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8780 AARCH64_FEATURE_CRC), "Cortex-A72"},
8781 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8782 AARCH64_FEATURE_CRC), "Cortex-A73"},
8783 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8784 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8786 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8787 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8789 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8790 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8792 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8793 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8794 | AARCH64_FEATURE_DOTPROD
8795 | AARCH64_FEATURE_PROFILE),
8797 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8798 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8799 "Samsung Exynos M1"},
8800 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8801 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8802 | AARCH64_FEATURE_RDMA),
8804 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8805 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8806 | AARCH64_FEATURE_DOTPROD
8807 | AARCH64_FEATURE_SSBS),
8809 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8810 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8811 | AARCH64_FEATURE_DOTPROD
8812 | AARCH64_FEATURE_PROFILE),
8814 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8815 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8816 | AARCH64_FEATURE_RDMA),
8817 "Qualcomm QDF24XX"},
8818 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8819 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8820 "Qualcomm Saphira"},
8821 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8822 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8824 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8825 AARCH64_FEATURE_CRYPTO),
8827 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8828 in earlier releases and is superseded by 'xgene1' in all
8830 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8831 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8832 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8833 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8834 {"generic", AARCH64_ARCH_V8, NULL},
8836 {NULL, AARCH64_ARCH_NONE, NULL}
8839 struct aarch64_arch_option_table
8842 const aarch64_feature_set value;
8845 /* This list should, at a minimum, contain all the architecture names
8846 recognized by GCC. */
8847 static const struct aarch64_arch_option_table aarch64_archs[] = {
8848 {"all", AARCH64_ANY},
8849 {"armv8-a", AARCH64_ARCH_V8},
8850 {"armv8.1-a", AARCH64_ARCH_V8_1},
8851 {"armv8.2-a", AARCH64_ARCH_V8_2},
8852 {"armv8.3-a", AARCH64_ARCH_V8_3},
8853 {"armv8.4-a", AARCH64_ARCH_V8_4},
8854 {"armv8.5-a", AARCH64_ARCH_V8_5},
8855 {NULL, AARCH64_ARCH_NONE}
8858 /* ISA extensions. */
8859 struct aarch64_option_cpu_value_table
8862 const aarch64_feature_set value;
8863 const aarch64_feature_set require; /* Feature dependencies. */
8866 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8867 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8869 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO
8870 | AARCH64_FEATURE_AES
8871 | AARCH64_FEATURE_SHA2, 0),
8872 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8873 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8875 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8877 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8878 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8879 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8881 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8883 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8885 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8886 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8887 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8888 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8889 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
8890 AARCH64_FEATURE (AARCH64_FEATURE_FP
8891 | AARCH64_FEATURE_F16, 0)},
8892 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8894 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8895 AARCH64_FEATURE (AARCH64_FEATURE_F16
8896 | AARCH64_FEATURE_SIMD
8897 | AARCH64_FEATURE_COMPNUM, 0)},
8898 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
8900 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
8901 AARCH64_FEATURE (AARCH64_FEATURE_F16
8902 | AARCH64_FEATURE_SIMD, 0)},
8903 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
8905 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
8907 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
8909 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
8911 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
8913 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
8915 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
8917 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA2
8918 | AARCH64_FEATURE_SHA3, 0),
8920 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
8922 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
8924 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
8926 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
8927 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
8928 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
8929 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
8930 | AARCH64_FEATURE_SM4, 0)},
8931 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
8932 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
8933 | AARCH64_FEATURE_AES, 0)},
8934 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
8935 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
8936 | AARCH64_FEATURE_SHA3, 0)},
8937 {"bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
8938 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
8939 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8942 struct aarch64_long_option_table
8944 const char *option; /* Substring to match. */
8945 const char *help; /* Help information. */
8946 int (*func) (const char *subopt); /* Function to decode sub-option. */
8947 char *deprecated; /* If non-null, print this message. */
8950 /* Transitive closure of features depending on set. */
8951 static aarch64_feature_set
8952 aarch64_feature_disable_set (aarch64_feature_set set)
8954 const struct aarch64_option_cpu_value_table *opt;
8955 aarch64_feature_set prev = 0;
8957 while (prev != set) {
8959 for (opt = aarch64_features; opt->name != NULL; opt++)
8960 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8961 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8966 /* Transitive closure of dependencies of set. */
8967 static aarch64_feature_set
8968 aarch64_feature_enable_set (aarch64_feature_set set)
8970 const struct aarch64_option_cpu_value_table *opt;
8971 aarch64_feature_set prev = 0;
8973 while (prev != set) {
8975 for (opt = aarch64_features; opt->name != NULL; opt++)
8976 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8977 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8983 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8984 bfd_boolean ext_only)
8986 /* We insist on extensions being added before being removed. We achieve
8987 this by using the ADDING_VALUE variable to indicate whether we are
8988 adding an extension (1) or removing it (0) and only allowing it to
8989 change in the order -1 -> 1 -> 0. */
8990 int adding_value = -1;
8991 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8993 /* Copy the feature set, so that we can modify it. */
8997 while (str != NULL && *str != 0)
8999 const struct aarch64_option_cpu_value_table *opt;
9000 const char *ext = NULL;
9007 as_bad (_("invalid architectural extension"));
9011 ext = strchr (++str, '+');
9017 optlen = strlen (str);
9019 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
9021 if (adding_value != 0)
9026 else if (optlen > 0)
9028 if (adding_value == -1)
9030 else if (adding_value != 1)
9032 as_bad (_("must specify extensions to add before specifying "
9033 "those to remove"));
9040 as_bad (_("missing architectural extension"));
9044 gas_assert (adding_value != -1);
9046 for (opt = aarch64_features; opt->name != NULL; opt++)
9047 if (strncmp (opt->name, str, optlen) == 0)
9049 aarch64_feature_set set;
9051 /* Add or remove the extension. */
9054 set = aarch64_feature_enable_set (opt->value);
9055 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9059 set = aarch64_feature_disable_set (opt->value);
9060 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9065 if (opt->name == NULL)
9067 as_bad (_("unknown architectural extension `%s'"), str);
9078 aarch64_parse_cpu (const char *str)
9080 const struct aarch64_cpu_option_table *opt;
9081 const char *ext = strchr (str, '+');
9087 optlen = strlen (str);
9091 as_bad (_("missing cpu name `%s'"), str);
9095 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9096 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9098 mcpu_cpu_opt = &opt->value;
9100 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9105 as_bad (_("unknown cpu `%s'"), str);
9110 aarch64_parse_arch (const char *str)
9112 const struct aarch64_arch_option_table *opt;
9113 const char *ext = strchr (str, '+');
9119 optlen = strlen (str);
9123 as_bad (_("missing architecture name `%s'"), str);
9127 for (opt = aarch64_archs; opt->name != NULL; opt++)
9128 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9130 march_cpu_opt = &opt->value;
9132 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9137 as_bad (_("unknown architecture `%s'\n"), str);
9142 struct aarch64_option_abi_value_table
9145 enum aarch64_abi_type value;
9148 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9149 {"ilp32", AARCH64_ABI_ILP32},
9150 {"lp64", AARCH64_ABI_LP64},
9154 aarch64_parse_abi (const char *str)
9160 as_bad (_("missing abi name `%s'"), str);
9164 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9165 if (strcmp (str, aarch64_abis[i].name) == 0)
9167 aarch64_abi = aarch64_abis[i].value;
9171 as_bad (_("unknown abi `%s'\n"), str);
9175 static struct aarch64_long_option_table aarch64_long_opts[] = {
9177 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9178 aarch64_parse_abi, NULL},
9179 #endif /* OBJ_ELF */
9180 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9181 aarch64_parse_cpu, NULL},
9182 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9183 aarch64_parse_arch, NULL},
9184 {NULL, NULL, 0, NULL}
9188 md_parse_option (int c, const char *arg)
9190 struct aarch64_option_table *opt;
9191 struct aarch64_long_option_table *lopt;
9197 target_big_endian = 1;
9203 target_big_endian = 0;
9208 /* Listing option. Just ignore these, we don't support additional
9213 for (opt = aarch64_opts; opt->option != NULL; opt++)
9215 if (c == opt->option[0]
9216 && ((arg == NULL && opt->option[1] == 0)
9217 || streq (arg, opt->option + 1)))
9219 /* If the option is deprecated, tell the user. */
9220 if (opt->deprecated != NULL)
9221 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9222 arg ? arg : "", _(opt->deprecated));
9224 if (opt->var != NULL)
9225 *opt->var = opt->value;
9231 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9233 /* These options are expected to have an argument. */
9234 if (c == lopt->option[0]
9236 && strncmp (arg, lopt->option + 1,
9237 strlen (lopt->option + 1)) == 0)
9239 /* If the option is deprecated, tell the user. */
9240 if (lopt->deprecated != NULL)
9241 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9242 _(lopt->deprecated));
9244 /* Call the sup-option parser. */
9245 return lopt->func (arg + strlen (lopt->option) - 1);
9256 md_show_usage (FILE * fp)
9258 struct aarch64_option_table *opt;
9259 struct aarch64_long_option_table *lopt;
9261 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9263 for (opt = aarch64_opts; opt->option != NULL; opt++)
9264 if (opt->help != NULL)
9265 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9267 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9268 if (lopt->help != NULL)
9269 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9273 -EB assemble code for a big-endian cpu\n"));
9278 -EL assemble code for a little-endian cpu\n"));
9282 /* Parse a .cpu directive. */
9285 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9287 const struct aarch64_cpu_option_table *opt;
9293 name = input_line_pointer;
9294 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9295 input_line_pointer++;
9296 saved_char = *input_line_pointer;
9297 *input_line_pointer = 0;
9299 ext = strchr (name, '+');
9302 optlen = ext - name;
9304 optlen = strlen (name);
9306 /* Skip the first "all" entry. */
9307 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9308 if (strlen (opt->name) == optlen
9309 && strncmp (name, opt->name, optlen) == 0)
9311 mcpu_cpu_opt = &opt->value;
9313 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9316 cpu_variant = *mcpu_cpu_opt;
9318 *input_line_pointer = saved_char;
9319 demand_empty_rest_of_line ();
9322 as_bad (_("unknown cpu `%s'"), name);
9323 *input_line_pointer = saved_char;
9324 ignore_rest_of_line ();
9328 /* Parse a .arch directive. */
9331 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9333 const struct aarch64_arch_option_table *opt;
9339 name = input_line_pointer;
9340 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9341 input_line_pointer++;
9342 saved_char = *input_line_pointer;
9343 *input_line_pointer = 0;
9345 ext = strchr (name, '+');
9348 optlen = ext - name;
9350 optlen = strlen (name);
9352 /* Skip the first "all" entry. */
9353 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9354 if (strlen (opt->name) == optlen
9355 && strncmp (name, opt->name, optlen) == 0)
9357 mcpu_cpu_opt = &opt->value;
9359 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9362 cpu_variant = *mcpu_cpu_opt;
9364 *input_line_pointer = saved_char;
9365 demand_empty_rest_of_line ();
9369 as_bad (_("unknown architecture `%s'\n"), name);
9370 *input_line_pointer = saved_char;
9371 ignore_rest_of_line ();
9374 /* Parse a .arch_extension directive. */
9377 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9380 char *ext = input_line_pointer;;
9382 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9383 input_line_pointer++;
9384 saved_char = *input_line_pointer;
9385 *input_line_pointer = 0;
9387 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9390 cpu_variant = *mcpu_cpu_opt;
9392 *input_line_pointer = saved_char;
9393 demand_empty_rest_of_line ();
9396 /* Copy symbol information. */
9399 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9401 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);