1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GAS.
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
25 #include "bfd_stdint.h"
27 #include "safe-ctype.h"
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
36 #include "dwarf2dbg.h"
38 /* Types of processor to assemble for. */
40 #define CPU_DEFAULT AARCH64_ARCH_V8
43 #define streq(a, b) (strcmp (a, b) == 0)
45 #define END_OF_INSN '\0'
47 static aarch64_feature_set cpu_variant;
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
65 /* Which ABI to use. */
74 #define DEFAULT_ARCH "aarch64"
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
107 struct vector_type_el
109 enum vector_el_type type;
110 unsigned char defined;
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
119 bfd_reloc_code_real_type type;
122 enum aarch64_opnd opnd;
124 unsigned need_libopcodes_p : 1;
127 struct aarch64_instruction
129 /* libopcodes structure for instruction intermediate representation. */
131 /* Record assembly errors found during the parsing. */
134 enum aarch64_operand_error_kind kind;
137 /* The condition that appears in the assembly line. */
139 /* Relocation information (including the GAS internal fixup). */
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
145 typedef struct aarch64_instruction aarch64_instruction;
147 static aarch64_instruction inst;
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
156 static struct aarch64_instr_sequence now_instr_sequence;
159 /* Diagnostics inline function utilities.
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
183 static inline bfd_boolean
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
189 static inline const char *
190 get_error_message (void)
192 return inst.parsing_error.error;
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
198 return inst.parsing_error.kind;
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
209 set_recoverable_error (const char *error)
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
217 set_default_error (void)
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
223 set_syntax_error (const char *error)
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
229 set_first_syntax_error (const char *error)
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
236 set_fatal_syntax_error (const char *error)
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
241 /* Number of littlenums required to hold an extended precision number. */
242 #define MAX_LITTLENUMS 6
244 /* Return value for certain parsers when the parsing fails; those parsers
245 return the information of the parsed result, e.g. register number, on
247 #define PARSE_FAIL -1
249 /* This is an invalid condition code that means no conditional field is
251 #define COND_ALWAYS 0x10
255 const char *template;
261 const char *template;
268 bfd_reloc_code_real_type reloc;
271 /* Macros to define the register types and masks for the purpose
274 #undef AARCH64_REG_TYPES
275 #define AARCH64_REG_TYPES \
276 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
277 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
278 BASIC_REG_TYPE(SP_32) /* wsp */ \
279 BASIC_REG_TYPE(SP_64) /* sp */ \
280 BASIC_REG_TYPE(Z_32) /* wzr */ \
281 BASIC_REG_TYPE(Z_64) /* xzr */ \
282 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
283 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
284 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
285 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
286 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
287 BASIC_REG_TYPE(VN) /* v[0-31] */ \
288 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
289 BASIC_REG_TYPE(PN) /* p[0-15] */ \
290 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
291 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
292 /* Typecheck: same, plus SVE registers. */ \
293 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
295 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
296 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
298 /* Typecheck: same, plus SVE registers. */ \
299 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
300 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
302 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
303 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
305 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
306 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
307 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
309 /* Typecheck: any [BHSDQ]P FP. */ \
310 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
311 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
312 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
313 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
314 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
315 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
316 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
317 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
318 be used for SVE instructions, since Zn and Pn are valid symbols \
319 in other contexts. */ \
320 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
322 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
323 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
324 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
325 | REG_TYPE(ZN) | REG_TYPE(PN)) \
326 /* Any integer register; used for error messages only. */ \
327 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
329 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
330 /* Pseudo type to mark the end of the enumerator sequence. */ \
333 #undef BASIC_REG_TYPE
334 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
335 #undef MULTI_REG_TYPE
336 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
338 /* Register type enumerators. */
339 typedef enum aarch64_reg_type_
341 /* A list of REG_TYPE_*. */
345 #undef BASIC_REG_TYPE
346 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
348 #define REG_TYPE(T) (1 << REG_TYPE_##T)
349 #undef MULTI_REG_TYPE
350 #define MULTI_REG_TYPE(T,V) V,
352 /* Structure for a hash table entry for a register. */
356 unsigned char number;
357 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
358 unsigned char builtin;
361 /* Values indexed by aarch64_reg_type to assist the type checking. */
362 static const unsigned reg_type_masks[] =
367 #undef BASIC_REG_TYPE
369 #undef MULTI_REG_TYPE
370 #undef AARCH64_REG_TYPES
372 /* Diagnostics used when we don't get a register of the expected type.
373 Note: this has to synchronized with aarch64_reg_type definitions
376 get_reg_expected_msg (aarch64_reg_type reg_type)
383 msg = N_("integer 32-bit register expected");
386 msg = N_("integer 64-bit register expected");
389 msg = N_("integer register expected");
391 case REG_TYPE_R64_SP:
392 msg = N_("64-bit integer or SP register expected");
394 case REG_TYPE_SVE_BASE:
395 msg = N_("base register expected");
398 msg = N_("integer or zero register expected");
400 case REG_TYPE_SVE_OFFSET:
401 msg = N_("offset register expected");
404 msg = N_("integer or SP register expected");
406 case REG_TYPE_R_Z_SP:
407 msg = N_("integer, zero or SP register expected");
410 msg = N_("8-bit SIMD scalar register expected");
413 msg = N_("16-bit SIMD scalar or floating-point half precision "
414 "register expected");
417 msg = N_("32-bit SIMD scalar or floating-point single precision "
418 "register expected");
421 msg = N_("64-bit SIMD scalar or floating-point double precision "
422 "register expected");
425 msg = N_("128-bit SIMD scalar or floating-point quad precision "
426 "register expected");
428 case REG_TYPE_R_Z_BHSDQ_V:
429 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
430 msg = N_("register expected");
432 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
433 msg = N_("SIMD scalar or floating-point register expected");
435 case REG_TYPE_VN: /* any V reg */
436 msg = N_("vector register expected");
439 msg = N_("SVE vector register expected");
442 msg = N_("SVE predicate register expected");
445 as_fatal (_("invalid register type %d"), reg_type);
450 /* Some well known registers that we refer to directly elsewhere. */
453 /* Instructions take 4 bytes in the object file. */
456 static struct hash_control *aarch64_ops_hsh;
457 static struct hash_control *aarch64_cond_hsh;
458 static struct hash_control *aarch64_shift_hsh;
459 static struct hash_control *aarch64_sys_regs_hsh;
460 static struct hash_control *aarch64_pstatefield_hsh;
461 static struct hash_control *aarch64_sys_regs_ic_hsh;
462 static struct hash_control *aarch64_sys_regs_dc_hsh;
463 static struct hash_control *aarch64_sys_regs_at_hsh;
464 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
465 static struct hash_control *aarch64_reg_hsh;
466 static struct hash_control *aarch64_barrier_opt_hsh;
467 static struct hash_control *aarch64_nzcv_hsh;
468 static struct hash_control *aarch64_pldop_hsh;
469 static struct hash_control *aarch64_hint_opt_hsh;
471 /* Stuff needed to resolve the label ambiguity
480 static symbolS *last_label_seen;
482 /* Literal pool structure. Held on a per-section
483 and per-sub-section basis. */
485 #define MAX_LITERAL_POOL_SIZE 1024
486 typedef struct literal_expression
489 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
490 LITTLENUM_TYPE * bignum;
491 } literal_expression;
493 typedef struct literal_pool
495 literal_expression literals[MAX_LITERAL_POOL_SIZE];
496 unsigned int next_free_entry;
502 struct literal_pool *next;
505 /* Pointer to a linked list of literal pools. */
506 static literal_pool *list_of_pools = NULL;
510 /* This array holds the chars that always start a comment. If the
511 pre-processor is disabled, these aren't very useful. */
512 const char comment_chars[] = "";
514 /* This array holds the chars that only start a comment at the beginning of
515 a line. If the line seems to have the form '# 123 filename'
516 .line and .file directives will appear in the pre-processed output. */
517 /* Note that input_file.c hand checks for '#' at the beginning of the
518 first line of the input file. This is because the compiler outputs
519 #NO_APP at the beginning of its output. */
520 /* Also note that comments like this one will always work. */
521 const char line_comment_chars[] = "#";
523 const char line_separator_chars[] = ";";
525 /* Chars that can be used to separate mant
526 from exp in floating point numbers. */
527 const char EXP_CHARS[] = "eE";
529 /* Chars that mean this number is a floating point constant. */
533 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
535 /* Prefix character that indicates the start of an immediate value. */
536 #define is_immediate_prefix(C) ((C) == '#')
538 /* Separator character handling. */
540 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
542 static inline bfd_boolean
543 skip_past_char (char **str, char c)
554 #define skip_past_comma(str) skip_past_char (str, ',')
556 /* Arithmetic expressions (possibly involving symbols). */
558 static bfd_boolean in_my_get_expression_p = FALSE;
560 /* Third argument to my_get_expression. */
561 #define GE_NO_PREFIX 0
562 #define GE_OPT_PREFIX 1
564 /* Return TRUE if the string pointed by *STR is successfully parsed
565 as an valid expression; *EP will be filled with the information of
566 such an expression. Otherwise return FALSE. */
569 my_get_expression (expressionS * ep, char **str, int prefix_mode,
574 int prefix_present_p = 0;
581 if (is_immediate_prefix (**str))
584 prefix_present_p = 1;
591 memset (ep, 0, sizeof (expressionS));
593 save_in = input_line_pointer;
594 input_line_pointer = *str;
595 in_my_get_expression_p = TRUE;
596 seg = expression (ep);
597 in_my_get_expression_p = FALSE;
599 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
601 /* We found a bad expression in md_operand(). */
602 *str = input_line_pointer;
603 input_line_pointer = save_in;
604 if (prefix_present_p && ! error_p ())
605 set_fatal_syntax_error (_("bad expression"));
607 set_first_syntax_error (_("bad expression"));
612 if (seg != absolute_section
613 && seg != text_section
614 && seg != data_section
615 && seg != bss_section && seg != undefined_section)
617 set_syntax_error (_("bad segment"));
618 *str = input_line_pointer;
619 input_line_pointer = save_in;
626 *str = input_line_pointer;
627 input_line_pointer = save_in;
631 /* Turn a string in input_line_pointer into a floating point constant
632 of type TYPE, and store the appropriate bytes in *LITP. The number
633 of LITTLENUMS emitted is stored in *SIZEP. An error message is
634 returned, or NULL on OK. */
637 md_atof (int type, char *litP, int *sizeP)
639 return ieee_md_atof (type, litP, sizeP, target_big_endian);
642 /* We handle all bad expressions here, so that we can report the faulty
643 instruction in the error message. */
645 md_operand (expressionS * exp)
647 if (in_my_get_expression_p)
648 exp->X_op = O_illegal;
651 /* Immediate values. */
653 /* Errors may be set multiple times during parsing or bit encoding
654 (particularly in the Neon bits), but usually the earliest error which is set
655 will be the most meaningful. Avoid overwriting it with later (cascading)
656 errors by calling this function. */
659 first_error (const char *error)
662 set_syntax_error (error);
665 /* Similar to first_error, but this function accepts formatted error
668 first_error_fmt (const char *format, ...)
673 /* N.B. this single buffer will not cause error messages for different
674 instructions to pollute each other; this is because at the end of
675 processing of each assembly line, error message if any will be
676 collected by as_bad. */
677 static char buffer[size];
681 int ret ATTRIBUTE_UNUSED;
682 va_start (args, format);
683 ret = vsnprintf (buffer, size, format, args);
684 know (ret <= size - 1 && ret >= 0);
686 set_syntax_error (buffer);
690 /* Register parsing. */
692 /* Generic register parser which is called by other specialized
694 CCP points to what should be the beginning of a register name.
695 If it is indeed a valid register name, advance CCP over it and
696 return the reg_entry structure; otherwise return NULL.
697 It does not issue diagnostics. */
700 parse_reg (char **ccp)
706 #ifdef REGISTER_PREFIX
707 if (*start != REGISTER_PREFIX)
713 if (!ISALPHA (*p) || !is_name_beginner (*p))
718 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
720 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
729 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
732 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
734 return (reg_type_masks[type] & (1 << reg->type)) != 0;
737 /* Try to parse a base or offset register. Allow SVE base and offset
738 registers if REG_TYPE includes SVE registers. Return the register
739 entry on success, setting *QUALIFIER to the register qualifier.
740 Return null otherwise.
742 Note that this function does not issue any diagnostics. */
744 static const reg_entry *
745 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
746 aarch64_opnd_qualifier_t *qualifier)
749 const reg_entry *reg = parse_reg (&str);
759 *qualifier = AARCH64_OPND_QLF_W;
765 *qualifier = AARCH64_OPND_QLF_X;
769 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
772 switch (TOLOWER (str[1]))
775 *qualifier = AARCH64_OPND_QLF_S_S;
778 *qualifier = AARCH64_OPND_QLF_S_D;
795 /* Try to parse a base or offset register. Return the register entry
796 on success, setting *QUALIFIER to the register qualifier. Return null
799 Note that this function does not issue any diagnostics. */
801 static const reg_entry *
802 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
804 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
807 /* Parse the qualifier of a vector register or vector element of type
808 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
809 succeeds; otherwise return FALSE.
811 Accept only one occurrence of:
812 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
815 parse_vector_type_for_operand (aarch64_reg_type reg_type,
816 struct vector_type_el *parsed_type, char **str)
820 unsigned element_size;
821 enum vector_el_type type;
824 gas_assert (*ptr == '.');
827 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
832 width = strtoul (ptr, &ptr, 10);
833 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
835 first_error_fmt (_("bad size %d in vector width specifier"), width);
840 switch (TOLOWER (*ptr))
859 if (reg_type == REG_TYPE_ZN || width == 1)
868 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
870 first_error (_("missing element size"));
873 if (width != 0 && width * element_size != 64
874 && width * element_size != 128
875 && !(width == 2 && element_size == 16)
876 && !(width == 4 && element_size == 8))
879 ("invalid element size %d and vector size combination %c"),
885 parsed_type->type = type;
886 parsed_type->width = width;
893 /* *STR contains an SVE zero/merge predication suffix. Parse it into
894 *PARSED_TYPE and point *STR at the end of the suffix. */
897 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
902 gas_assert (*ptr == '/');
904 switch (TOLOWER (*ptr))
907 parsed_type->type = NT_zero;
910 parsed_type->type = NT_merge;
913 if (*ptr != '\0' && *ptr != ',')
914 first_error_fmt (_("unexpected character `%c' in predication type"),
917 first_error (_("missing predication type"));
920 parsed_type->width = 0;
925 /* Parse a register of the type TYPE.
927 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
928 name or the parsed register is not of TYPE.
930 Otherwise return the register number, and optionally fill in the actual
931 type of the register in *RTYPE when multiple alternatives were given, and
932 return the register shape and element index information in *TYPEINFO.
934 IN_REG_LIST should be set with TRUE if the caller is parsing a register
938 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
939 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
942 const reg_entry *reg = parse_reg (&str);
943 struct vector_type_el atype;
944 struct vector_type_el parsetype;
945 bfd_boolean is_typed_vecreg = FALSE;
948 atype.type = NT_invtype;
956 set_default_error ();
960 if (! aarch64_check_reg_type (reg, type))
962 DEBUG_TRACE ("reg type check failed");
963 set_default_error ();
968 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
969 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
973 if (!parse_vector_type_for_operand (type, &parsetype, &str))
978 if (!parse_predication_for_operand (&parsetype, &str))
982 /* Register if of the form Vn.[bhsdq]. */
983 is_typed_vecreg = TRUE;
985 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
987 /* The width is always variable; we don't allow an integer width
989 gas_assert (parsetype.width == 0);
990 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
992 else if (parsetype.width == 0)
993 /* Expect index. In the new scheme we cannot have
994 Vn.[bhsdq] represent a scalar. Therefore any
995 Vn.[bhsdq] should have an index following it.
996 Except in reglists of course. */
997 atype.defined |= NTA_HASINDEX;
999 atype.defined |= NTA_HASTYPE;
1001 atype.type = parsetype.type;
1002 atype.width = parsetype.width;
1005 if (skip_past_char (&str, '['))
1009 /* Reject Sn[index] syntax. */
1010 if (!is_typed_vecreg)
1012 first_error (_("this type of register can't be indexed"));
1018 first_error (_("index not allowed inside register list"));
1022 atype.defined |= NTA_HASINDEX;
1024 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1026 if (exp.X_op != O_constant)
1028 first_error (_("constant expression required"));
1032 if (! skip_past_char (&str, ']'))
1035 atype.index = exp.X_add_number;
1037 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1039 /* Indexed vector register expected. */
1040 first_error (_("indexed vector register expected"));
1044 /* A vector reg Vn should be typed or indexed. */
1045 if (type == REG_TYPE_VN && atype.defined == 0)
1047 first_error (_("invalid use of vector register"));
1063 Return the register number on success; return PARSE_FAIL otherwise.
1065 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1066 the register (e.g. NEON double or quad reg when either has been requested).
1068 If this is a NEON vector register with additional type information, fill
1069 in the struct pointed to by VECTYPE (if non-NULL).
1071 This parser does not handle register list. */
1074 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1075 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1077 struct vector_type_el atype;
1079 int reg = parse_typed_reg (&str, type, rtype, &atype,
1080 /*in_reg_list= */ FALSE);
1082 if (reg == PARSE_FAIL)
1093 static inline bfd_boolean
1094 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1098 && e1.defined == e2.defined
1099 && e1.width == e2.width && e1.index == e2.index;
1102 /* This function parses a list of vector registers of type TYPE.
1103 On success, it returns the parsed register list information in the
1104 following encoded format:
1106 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1107 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1109 The information of the register shape and/or index is returned in
1112 It returns PARSE_FAIL if the register list is invalid.
1114 The list contains one to four registers.
1115 Each register can be one of:
1118 All <T> should be identical.
1119 All <index> should be identical.
1120 There are restrictions on <Vt> numbers which are checked later
1121 (by reg_list_valid_p). */
1124 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1125 struct vector_type_el *vectype)
1129 struct vector_type_el typeinfo, typeinfo_first;
1134 bfd_boolean error = FALSE;
1135 bfd_boolean expect_index = FALSE;
1139 set_syntax_error (_("expecting {"));
1145 typeinfo_first.defined = 0;
1146 typeinfo_first.type = NT_invtype;
1147 typeinfo_first.width = -1;
1148 typeinfo_first.index = 0;
1157 str++; /* skip over '-' */
1160 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1161 /*in_reg_list= */ TRUE);
1162 if (val == PARSE_FAIL)
1164 set_first_syntax_error (_("invalid vector register in list"));
1168 /* reject [bhsd]n */
1169 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1171 set_first_syntax_error (_("invalid scalar register in list"));
1176 if (typeinfo.defined & NTA_HASINDEX)
1177 expect_index = TRUE;
1181 if (val < val_range)
1183 set_first_syntax_error
1184 (_("invalid range in vector register list"));
1193 typeinfo_first = typeinfo;
1194 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1196 set_first_syntax_error
1197 (_("type mismatch in vector register list"));
1202 for (i = val_range; i <= val; i++)
1204 ret_val |= i << (5 * nb_regs);
1209 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1211 skip_whitespace (str);
1214 set_first_syntax_error (_("end of vector register list not found"));
1219 skip_whitespace (str);
1223 if (skip_past_char (&str, '['))
1227 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1228 if (exp.X_op != O_constant)
1230 set_first_syntax_error (_("constant expression required."));
1233 if (! skip_past_char (&str, ']'))
1236 typeinfo_first.index = exp.X_add_number;
1240 set_first_syntax_error (_("expected index"));
1247 set_first_syntax_error (_("too many registers in vector register list"));
1250 else if (nb_regs == 0)
1252 set_first_syntax_error (_("empty vector register list"));
1258 *vectype = typeinfo_first;
1260 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1263 /* Directives: register aliases. */
1266 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1271 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1274 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1277 /* Only warn about a redefinition if it's not defined as the
1279 else if (new->number != number || new->type != type)
1280 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1285 name = xstrdup (str);
1286 new = XNEW (reg_entry);
1289 new->number = number;
1291 new->builtin = FALSE;
1293 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1299 /* Look for the .req directive. This is of the form:
1301 new_register_name .req existing_register_name
1303 If we find one, or if it looks sufficiently like one that we want to
1304 handle any error here, return TRUE. Otherwise return FALSE. */
1307 create_register_alias (char *newname, char *p)
1309 const reg_entry *old;
1310 char *oldname, *nbuf;
1313 /* The input scrubber ensures that whitespace after the mnemonic is
1314 collapsed to single spaces. */
1316 if (strncmp (oldname, " .req ", 6) != 0)
1320 if (*oldname == '\0')
1323 old = hash_find (aarch64_reg_hsh, oldname);
1326 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1330 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1331 the desired alias name, and p points to its end. If not, then
1332 the desired alias name is in the global original_case_string. */
1333 #ifdef TC_CASE_SENSITIVE
1336 newname = original_case_string;
1337 nlen = strlen (newname);
1340 nbuf = xmemdup0 (newname, nlen);
1342 /* Create aliases under the new name as stated; an all-lowercase
1343 version of the new name; and an all-uppercase version of the new
1345 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1347 for (p = nbuf; *p; p++)
1350 if (strncmp (nbuf, newname, nlen))
1352 /* If this attempt to create an additional alias fails, do not bother
1353 trying to create the all-lower case alias. We will fail and issue
1354 a second, duplicate error message. This situation arises when the
1355 programmer does something like:
1358 The second .req creates the "Foo" alias but then fails to create
1359 the artificial FOO alias because it has already been created by the
1361 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1368 for (p = nbuf; *p; p++)
1371 if (strncmp (nbuf, newname, nlen))
1372 insert_reg_alias (nbuf, old->number, old->type);
1379 /* Should never be called, as .req goes between the alias and the
1380 register name, not at the beginning of the line. */
1382 s_req (int a ATTRIBUTE_UNUSED)
1384 as_bad (_("invalid syntax for .req directive"));
1387 /* The .unreq directive deletes an alias which was previously defined
1388 by .req. For example:
1394 s_unreq (int a ATTRIBUTE_UNUSED)
1399 name = input_line_pointer;
1401 while (*input_line_pointer != 0
1402 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1403 ++input_line_pointer;
1405 saved_char = *input_line_pointer;
1406 *input_line_pointer = 0;
1409 as_bad (_("invalid syntax for .unreq directive"));
1412 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1415 as_bad (_("unknown register alias '%s'"), name);
1416 else if (reg->builtin)
1417 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1424 hash_delete (aarch64_reg_hsh, name, FALSE);
1425 free ((char *) reg->name);
1428 /* Also locate the all upper case and all lower case versions.
1429 Do not complain if we cannot find one or the other as it
1430 was probably deleted above. */
1432 nbuf = strdup (name);
1433 for (p = nbuf; *p; p++)
1435 reg = hash_find (aarch64_reg_hsh, nbuf);
1438 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1439 free ((char *) reg->name);
1443 for (p = nbuf; *p; p++)
1445 reg = hash_find (aarch64_reg_hsh, nbuf);
1448 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1449 free ((char *) reg->name);
1457 *input_line_pointer = saved_char;
1458 demand_empty_rest_of_line ();
1461 /* Directives: Instruction set selection. */
1464 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1465 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1466 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1467 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1469 /* Create a new mapping symbol for the transition to STATE. */
1472 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1475 const char *symname;
1482 type = BSF_NO_FLAGS;
1486 type = BSF_NO_FLAGS;
1492 symbolP = symbol_new (symname, now_seg, value, frag);
1493 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1495 /* Save the mapping symbols for future reference. Also check that
1496 we do not place two mapping symbols at the same offset within a
1497 frag. We'll handle overlap between frags in
1498 check_mapping_symbols.
1500 If .fill or other data filling directive generates zero sized data,
1501 the mapping symbol for the following code will have the same value
1502 as the one generated for the data filling directive. In this case,
1503 we replace the old symbol with the new one at the same address. */
1506 if (frag->tc_frag_data.first_map != NULL)
1508 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1509 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1512 frag->tc_frag_data.first_map = symbolP;
1514 if (frag->tc_frag_data.last_map != NULL)
1516 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1517 S_GET_VALUE (symbolP));
1518 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1519 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1522 frag->tc_frag_data.last_map = symbolP;
1525 /* We must sometimes convert a region marked as code to data during
1526 code alignment, if an odd number of bytes have to be padded. The
1527 code mapping symbol is pushed to an aligned address. */
1530 insert_data_mapping_symbol (enum mstate state,
1531 valueT value, fragS * frag, offsetT bytes)
1533 /* If there was already a mapping symbol, remove it. */
1534 if (frag->tc_frag_data.last_map != NULL
1535 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1536 frag->fr_address + value)
1538 symbolS *symp = frag->tc_frag_data.last_map;
1542 know (frag->tc_frag_data.first_map == symp);
1543 frag->tc_frag_data.first_map = NULL;
1545 frag->tc_frag_data.last_map = NULL;
1546 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1549 make_mapping_symbol (MAP_DATA, value, frag);
1550 make_mapping_symbol (state, value + bytes, frag);
1553 static void mapping_state_2 (enum mstate state, int max_chars);
1555 /* Set the mapping state to STATE. Only call this when about to
1556 emit some STATE bytes to the file. */
1559 mapping_state (enum mstate state)
1561 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1563 if (state == MAP_INSN)
1564 /* AArch64 instructions require 4-byte alignment. When emitting
1565 instructions into any section, record the appropriate section
1567 record_alignment (now_seg, 2);
1569 if (mapstate == state)
1570 /* The mapping symbol has already been emitted.
1571 There is nothing else to do. */
1574 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1575 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1576 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1577 evaluated later in the next else. */
1579 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1581 /* Only add the symbol if the offset is > 0:
1582 if we're at the first frag, check it's size > 0;
1583 if we're not at the first frag, then for sure
1584 the offset is > 0. */
1585 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1586 const int add_symbol = (frag_now != frag_first)
1587 || (frag_now_fix () > 0);
1590 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1594 mapping_state_2 (state, 0);
1597 /* Same as mapping_state, but MAX_CHARS bytes have already been
1598 allocated. Put the mapping symbol that far back. */
1601 mapping_state_2 (enum mstate state, int max_chars)
1603 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1605 if (!SEG_NORMAL (now_seg))
1608 if (mapstate == state)
1609 /* The mapping symbol has already been emitted.
1610 There is nothing else to do. */
1613 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1614 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1617 #define mapping_state(x) /* nothing */
1618 #define mapping_state_2(x, y) /* nothing */
1621 /* Directives: sectioning and alignment. */
1624 s_bss (int ignore ATTRIBUTE_UNUSED)
1626 /* We don't support putting frags in the BSS segment, we fake it by
1627 marking in_bss, then looking at s_skip for clues. */
1628 subseg_set (bss_section, 0);
1629 demand_empty_rest_of_line ();
1630 mapping_state (MAP_DATA);
1634 s_even (int ignore ATTRIBUTE_UNUSED)
1636 /* Never make frag if expect extra pass. */
1638 frag_align (1, 0, 0);
1640 record_alignment (now_seg, 1);
1642 demand_empty_rest_of_line ();
1645 /* Directives: Literal pools. */
1647 static literal_pool *
1648 find_literal_pool (int size)
1652 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1654 if (pool->section == now_seg
1655 && pool->sub_section == now_subseg && pool->size == size)
1662 static literal_pool *
1663 find_or_make_literal_pool (int size)
1665 /* Next literal pool ID number. */
1666 static unsigned int latest_pool_num = 1;
1669 pool = find_literal_pool (size);
1673 /* Create a new pool. */
1674 pool = XNEW (literal_pool);
1678 /* Currently we always put the literal pool in the current text
1679 section. If we were generating "small" model code where we
1680 knew that all code and initialised data was within 1MB then
1681 we could output literals to mergeable, read-only data
1684 pool->next_free_entry = 0;
1685 pool->section = now_seg;
1686 pool->sub_section = now_subseg;
1688 pool->next = list_of_pools;
1689 pool->symbol = NULL;
1691 /* Add it to the list. */
1692 list_of_pools = pool;
1695 /* New pools, and emptied pools, will have a NULL symbol. */
1696 if (pool->symbol == NULL)
1698 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1699 (valueT) 0, &zero_address_frag);
1700 pool->id = latest_pool_num++;
1707 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1708 Return TRUE on success, otherwise return FALSE. */
1710 add_to_lit_pool (expressionS *exp, int size)
1715 pool = find_or_make_literal_pool (size);
1717 /* Check if this literal value is already in the pool. */
1718 for (entry = 0; entry < pool->next_free_entry; entry++)
1720 expressionS * litexp = & pool->literals[entry].exp;
1722 if ((litexp->X_op == exp->X_op)
1723 && (exp->X_op == O_constant)
1724 && (litexp->X_add_number == exp->X_add_number)
1725 && (litexp->X_unsigned == exp->X_unsigned))
1728 if ((litexp->X_op == exp->X_op)
1729 && (exp->X_op == O_symbol)
1730 && (litexp->X_add_number == exp->X_add_number)
1731 && (litexp->X_add_symbol == exp->X_add_symbol)
1732 && (litexp->X_op_symbol == exp->X_op_symbol))
1736 /* Do we need to create a new entry? */
1737 if (entry == pool->next_free_entry)
1739 if (entry >= MAX_LITERAL_POOL_SIZE)
1741 set_syntax_error (_("literal pool overflow"));
1745 pool->literals[entry].exp = *exp;
1746 pool->next_free_entry += 1;
1747 if (exp->X_op == O_big)
1749 /* PR 16688: Bignums are held in a single global array. We must
1750 copy and preserve that value now, before it is overwritten. */
1751 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1753 memcpy (pool->literals[entry].bignum, generic_bignum,
1754 CHARS_PER_LITTLENUM * exp->X_add_number);
1757 pool->literals[entry].bignum = NULL;
1760 exp->X_op = O_symbol;
1761 exp->X_add_number = ((int) entry) * size;
1762 exp->X_add_symbol = pool->symbol;
1767 /* Can't use symbol_new here, so have to create a symbol and then at
1768 a later date assign it a value. That's what these functions do. */
1771 symbol_locate (symbolS * symbolP,
1772 const char *name,/* It is copied, the caller can modify. */
1773 segT segment, /* Segment identifier (SEG_<something>). */
1774 valueT valu, /* Symbol value. */
1775 fragS * frag) /* Associated fragment. */
1778 char *preserved_copy_of_name;
1780 name_length = strlen (name) + 1; /* +1 for \0. */
1781 obstack_grow (¬es, name, name_length);
1782 preserved_copy_of_name = obstack_finish (¬es);
1784 #ifdef tc_canonicalize_symbol_name
1785 preserved_copy_of_name =
1786 tc_canonicalize_symbol_name (preserved_copy_of_name);
1789 S_SET_NAME (symbolP, preserved_copy_of_name);
1791 S_SET_SEGMENT (symbolP, segment);
1792 S_SET_VALUE (symbolP, valu);
1793 symbol_clear_list_pointers (symbolP);
1795 symbol_set_frag (symbolP, frag);
1797 /* Link to end of symbol chain. */
1799 extern int symbol_table_frozen;
1801 if (symbol_table_frozen)
1805 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1807 obj_symbol_new_hook (symbolP);
1809 #ifdef tc_symbol_new_hook
1810 tc_symbol_new_hook (symbolP);
1814 verify_symbol_chain (symbol_rootP, symbol_lastP);
1815 #endif /* DEBUG_SYMS */
1820 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1827 for (align = 2; align <= 4; align++)
1829 int size = 1 << align;
1831 pool = find_literal_pool (size);
1832 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1835 /* Align pool as you have word accesses.
1836 Only make a frag if we have to. */
1838 frag_align (align, 0, 0);
1840 mapping_state (MAP_DATA);
1842 record_alignment (now_seg, align);
1844 sprintf (sym_name, "$$lit_\002%x", pool->id);
1846 symbol_locate (pool->symbol, sym_name, now_seg,
1847 (valueT) frag_now_fix (), frag_now);
1848 symbol_table_insert (pool->symbol);
1850 for (entry = 0; entry < pool->next_free_entry; entry++)
1852 expressionS * exp = & pool->literals[entry].exp;
1854 if (exp->X_op == O_big)
1856 /* PR 16688: Restore the global bignum value. */
1857 gas_assert (pool->literals[entry].bignum != NULL);
1858 memcpy (generic_bignum, pool->literals[entry].bignum,
1859 CHARS_PER_LITTLENUM * exp->X_add_number);
1862 /* First output the expression in the instruction to the pool. */
1863 emit_expr (exp, size); /* .word|.xword */
1865 if (exp->X_op == O_big)
1867 free (pool->literals[entry].bignum);
1868 pool->literals[entry].bignum = NULL;
1872 /* Mark the pool as empty. */
1873 pool->next_free_entry = 0;
1874 pool->symbol = NULL;
1879 /* Forward declarations for functions below, in the MD interface
1881 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1882 static struct reloc_table_entry * find_reloc_table_entry (char **);
1884 /* Directives: Data. */
1885 /* N.B. the support for relocation suffix in this directive needs to be
1886 implemented properly. */
1889 s_aarch64_elf_cons (int nbytes)
1893 #ifdef md_flush_pending_output
1894 md_flush_pending_output ();
1897 if (is_it_end_of_statement ())
1899 demand_empty_rest_of_line ();
1903 #ifdef md_cons_align
1904 md_cons_align (nbytes);
1907 mapping_state (MAP_DATA);
1910 struct reloc_table_entry *reloc;
1914 if (exp.X_op != O_symbol)
1915 emit_expr (&exp, (unsigned int) nbytes);
1918 skip_past_char (&input_line_pointer, '#');
1919 if (skip_past_char (&input_line_pointer, ':'))
1921 reloc = find_reloc_table_entry (&input_line_pointer);
1923 as_bad (_("unrecognized relocation suffix"));
1925 as_bad (_("unimplemented relocation suffix"));
1926 ignore_rest_of_line ();
1930 emit_expr (&exp, (unsigned int) nbytes);
1933 while (*input_line_pointer++ == ',');
1935 /* Put terminator back into stream. */
1936 input_line_pointer--;
1937 demand_empty_rest_of_line ();
1940 #endif /* OBJ_ELF */
1942 /* Output a 32-bit word, but mark as an instruction. */
1945 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1949 #ifdef md_flush_pending_output
1950 md_flush_pending_output ();
1953 if (is_it_end_of_statement ())
1955 demand_empty_rest_of_line ();
1959 /* Sections are assumed to start aligned. In executable section, there is no
1960 MAP_DATA symbol pending. So we only align the address during
1961 MAP_DATA --> MAP_INSN transition.
1962 For other sections, this is not guaranteed. */
1963 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1964 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1965 frag_align_code (2, 0);
1968 mapping_state (MAP_INSN);
1974 if (exp.X_op != O_constant)
1976 as_bad (_("constant expression required"));
1977 ignore_rest_of_line ();
1981 if (target_big_endian)
1983 unsigned int val = exp.X_add_number;
1984 exp.X_add_number = SWAP_32 (val);
1986 emit_expr (&exp, 4);
1988 while (*input_line_pointer++ == ',');
1990 /* Put terminator back into stream. */
1991 input_line_pointer--;
1992 demand_empty_rest_of_line ();
1996 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1999 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2005 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2006 BFD_RELOC_AARCH64_TLSDESC_ADD);
2008 demand_empty_rest_of_line ();
2011 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2014 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2018 /* Since we're just labelling the code, there's no need to define a
2021 /* Make sure there is enough room in this frag for the following
2022 blr. This trick only works if the blr follows immediately after
2023 the .tlsdesc directive. */
2025 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2026 BFD_RELOC_AARCH64_TLSDESC_CALL);
2028 demand_empty_rest_of_line ();
2031 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2034 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2040 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2041 BFD_RELOC_AARCH64_TLSDESC_LDR);
2043 demand_empty_rest_of_line ();
2045 #endif /* OBJ_ELF */
2047 static void s_aarch64_arch (int);
2048 static void s_aarch64_cpu (int);
2049 static void s_aarch64_arch_extension (int);
2051 /* This table describes all the machine specific pseudo-ops the assembler
2052 has to support. The fields are:
2053 pseudo-op name without dot
2054 function to call to execute this pseudo-op
2055 Integer arg to pass to the function. */
2057 const pseudo_typeS md_pseudo_table[] = {
2058 /* Never called because '.req' does not start a line. */
2060 {"unreq", s_unreq, 0},
2062 {"even", s_even, 0},
2063 {"ltorg", s_ltorg, 0},
2064 {"pool", s_ltorg, 0},
2065 {"cpu", s_aarch64_cpu, 0},
2066 {"arch", s_aarch64_arch, 0},
2067 {"arch_extension", s_aarch64_arch_extension, 0},
2068 {"inst", s_aarch64_inst, 0},
2070 {"tlsdescadd", s_tlsdescadd, 0},
2071 {"tlsdesccall", s_tlsdesccall, 0},
2072 {"tlsdescldr", s_tlsdescldr, 0},
2073 {"word", s_aarch64_elf_cons, 4},
2074 {"long", s_aarch64_elf_cons, 4},
2075 {"xword", s_aarch64_elf_cons, 8},
2076 {"dword", s_aarch64_elf_cons, 8},
2082 /* Check whether STR points to a register name followed by a comma or the
2083 end of line; REG_TYPE indicates which register types are checked
2084 against. Return TRUE if STR is such a register name; otherwise return
2085 FALSE. The function does not intend to produce any diagnostics, but since
2086 the register parser aarch64_reg_parse, which is called by this function,
2087 does produce diagnostics, we call clear_error to clear any diagnostics
2088 that may be generated by aarch64_reg_parse.
2089 Also, the function returns FALSE directly if there is any user error
2090 present at the function entry. This prevents the existing diagnostics
2091 state from being spoiled.
2092 The function currently serves parse_constant_immediate and
2093 parse_big_immediate only. */
2095 reg_name_p (char *str, aarch64_reg_type reg_type)
2099 /* Prevent the diagnostics state from being spoiled. */
2103 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2105 /* Clear the parsing error that may be set by the reg parser. */
2108 if (reg == PARSE_FAIL)
2111 skip_whitespace (str);
2112 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2118 /* Parser functions used exclusively in instruction operands. */
2120 /* Parse an immediate expression which may not be constant.
2122 To prevent the expression parser from pushing a register name
2123 into the symbol table as an undefined symbol, firstly a check is
2124 done to find out whether STR is a register of type REG_TYPE followed
2125 by a comma or the end of line. Return FALSE if STR is such a string. */
2128 parse_immediate_expression (char **str, expressionS *exp,
2129 aarch64_reg_type reg_type)
2131 if (reg_name_p (*str, reg_type))
2133 set_recoverable_error (_("immediate operand required"));
2137 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2139 if (exp->X_op == O_absent)
2141 set_fatal_syntax_error (_("missing immediate expression"));
2148 /* Constant immediate-value read function for use in insn parsing.
2149 STR points to the beginning of the immediate (with the optional
2150 leading #); *VAL receives the value. REG_TYPE says which register
2151 names should be treated as registers rather than as symbolic immediates.
2153 Return TRUE on success; otherwise return FALSE. */
2156 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2160 if (! parse_immediate_expression (str, &exp, reg_type))
2163 if (exp.X_op != O_constant)
2165 set_syntax_error (_("constant expression required"));
2169 *val = exp.X_add_number;
2174 encode_imm_float_bits (uint32_t imm)
2176 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2177 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2180 /* Return TRUE if the single-precision floating-point value encoded in IMM
2181 can be expressed in the AArch64 8-bit signed floating-point format with
2182 3-bit exponent and normalized 4 bits of precision; in other words, the
2183 floating-point value must be expressable as
2184 (+/-) n / 16 * power (2, r)
2185 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2188 aarch64_imm_float_p (uint32_t imm)
2190 /* If a single-precision floating-point value has the following bit
2191 pattern, it can be expressed in the AArch64 8-bit floating-point
2194 3 32222222 2221111111111
2195 1 09876543 21098765432109876543210
2196 n Eeeeeexx xxxx0000000000000000000
2198 where n, e and each x are either 0 or 1 independently, with
2203 /* Prepare the pattern for 'Eeeeee'. */
2204 if (((imm >> 30) & 0x1) == 0)
2205 pattern = 0x3e000000;
2207 pattern = 0x40000000;
2209 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2210 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2213 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2214 as an IEEE float without any loss of precision. Store the value in
2218 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2220 /* If a double-precision floating-point value has the following bit
2221 pattern, it can be expressed in a float:
2223 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2224 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2225 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2227 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2228 if Eeee_eeee != 1111_1111
2230 where n, e, s and S are either 0 or 1 independently and where ~ is the
2234 uint32_t high32 = imm >> 32;
2235 uint32_t low32 = imm;
2237 /* Lower 29 bits need to be 0s. */
2238 if ((imm & 0x1fffffff) != 0)
2241 /* Prepare the pattern for 'Eeeeeeeee'. */
2242 if (((high32 >> 30) & 0x1) == 0)
2243 pattern = 0x38000000;
2245 pattern = 0x40000000;
2248 if ((high32 & 0x78000000) != pattern)
2251 /* Check Eeee_eeee != 1111_1111. */
2252 if ((high32 & 0x7ff00000) == 0x47f00000)
2255 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2256 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2257 | (low32 >> 29)); /* 3 S bits. */
2261 /* Return true if we should treat OPERAND as a double-precision
2262 floating-point operand rather than a single-precision one. */
2264 double_precision_operand_p (const aarch64_opnd_info *operand)
2266 /* Check for unsuffixed SVE registers, which are allowed
2267 for LDR and STR but not in instructions that require an
2268 immediate. We get better error messages if we arbitrarily
2269 pick one size, parse the immediate normally, and then
2270 report the match failure in the normal way. */
2271 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2272 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2275 /* Parse a floating-point immediate. Return TRUE on success and return the
2276 value in *IMMED in the format of IEEE754 single-precision encoding.
2277 *CCP points to the start of the string; DP_P is TRUE when the immediate
2278 is expected to be in double-precision (N.B. this only matters when
2279 hexadecimal representation is involved). REG_TYPE says which register
2280 names should be treated as registers rather than as symbolic immediates.
2282 This routine accepts any IEEE float; it is up to the callers to reject
2286 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2287 aarch64_reg_type reg_type)
2291 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2293 unsigned fpword = 0;
2294 bfd_boolean hex_p = FALSE;
2296 skip_past_char (&str, '#');
2299 skip_whitespace (fpnum);
2301 if (strncmp (fpnum, "0x", 2) == 0)
2303 /* Support the hexadecimal representation of the IEEE754 encoding.
2304 Double-precision is expected when DP_P is TRUE, otherwise the
2305 representation should be in single-precision. */
2306 if (! parse_constant_immediate (&str, &val, reg_type))
2311 if (!can_convert_double_to_float (val, &fpword))
2314 else if ((uint64_t) val > 0xffffffff)
2321 else if (reg_name_p (str, reg_type))
2323 set_recoverable_error (_("immediate operand required"));
2331 if ((str = atof_ieee (str, 's', words)) == NULL)
2334 /* Our FP word must be 32 bits (single-precision FP). */
2335 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2337 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2347 set_fatal_syntax_error (_("invalid floating-point constant"));
2351 /* Less-generic immediate-value read function with the possibility of loading
2352 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2355 To prevent the expression parser from pushing a register name into the
2356 symbol table as an undefined symbol, a check is firstly done to find
2357 out whether STR is a register of type REG_TYPE followed by a comma or
2358 the end of line. Return FALSE if STR is such a register. */
2361 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2365 if (reg_name_p (ptr, reg_type))
2367 set_syntax_error (_("immediate operand required"));
2371 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2373 if (inst.reloc.exp.X_op == O_constant)
2374 *imm = inst.reloc.exp.X_add_number;
2381 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2382 if NEED_LIBOPCODES is non-zero, the fixup will need
2383 assistance from the libopcodes. */
2386 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2387 const aarch64_opnd_info *operand,
2388 int need_libopcodes_p)
2390 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2391 reloc->opnd = operand->type;
2392 if (need_libopcodes_p)
2393 reloc->need_libopcodes_p = 1;
2396 /* Return TRUE if the instruction needs to be fixed up later internally by
2397 the GAS; otherwise return FALSE. */
2399 static inline bfd_boolean
2400 aarch64_gas_internal_fixup_p (void)
2402 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2405 /* Assign the immediate value to the relevant field in *OPERAND if
2406 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2407 needs an internal fixup in a later stage.
2408 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2409 IMM.VALUE that may get assigned with the constant. */
2411 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2412 aarch64_opnd_info *operand,
2414 int need_libopcodes_p,
2417 if (reloc->exp.X_op == O_constant)
2420 operand->addr.offset.imm = reloc->exp.X_add_number;
2422 operand->imm.value = reloc->exp.X_add_number;
2423 reloc->type = BFD_RELOC_UNUSED;
2427 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2428 /* Tell libopcodes to ignore this operand or not. This is helpful
2429 when one of the operands needs to be fixed up later but we need
2430 libopcodes to check the other operands. */
2431 operand->skip = skip_p;
2435 /* Relocation modifiers. Each entry in the table contains the textual
2436 name for the relocation which may be placed before a symbol used as
2437 a load/store offset, or add immediate. It must be surrounded by a
2438 leading and trailing colon, for example:
2440 ldr x0, [x1, #:rello:varsym]
2441 add x0, x1, #:rello:varsym */
2443 struct reloc_table_entry
2447 bfd_reloc_code_real_type adr_type;
2448 bfd_reloc_code_real_type adrp_type;
2449 bfd_reloc_code_real_type movw_type;
2450 bfd_reloc_code_real_type add_type;
2451 bfd_reloc_code_real_type ldst_type;
2452 bfd_reloc_code_real_type ld_literal_type;
2455 static struct reloc_table_entry reloc_table[] = {
2456 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2461 BFD_RELOC_AARCH64_ADD_LO12,
2462 BFD_RELOC_AARCH64_LDST_LO12,
2465 /* Higher 21 bits of pc-relative page offset: ADRP */
2468 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2474 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2477 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2483 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2487 BFD_RELOC_AARCH64_MOVW_G0,
2492 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2496 BFD_RELOC_AARCH64_MOVW_G0_S,
2501 /* Less significant bits 0-15 of address/value: MOVK, no check */
2505 BFD_RELOC_AARCH64_MOVW_G0_NC,
2510 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2514 BFD_RELOC_AARCH64_MOVW_G1,
2519 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2523 BFD_RELOC_AARCH64_MOVW_G1_S,
2528 /* Less significant bits 16-31 of address/value: MOVK, no check */
2532 BFD_RELOC_AARCH64_MOVW_G1_NC,
2537 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2541 BFD_RELOC_AARCH64_MOVW_G2,
2546 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2550 BFD_RELOC_AARCH64_MOVW_G2_S,
2555 /* Less significant bits 32-47 of address/value: MOVK, no check */
2559 BFD_RELOC_AARCH64_MOVW_G2_NC,
2564 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2568 BFD_RELOC_AARCH64_MOVW_G3,
2573 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2577 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2582 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2586 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2591 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2595 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2600 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2604 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2609 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2613 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2618 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2622 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2627 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2631 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2636 /* Get to the page containing GOT entry for a symbol. */
2639 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2643 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2645 /* 12 bit offset into the page containing GOT entry for that symbol. */
2651 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2654 /* 0-15 bits of address/value: MOVk, no check. */
2658 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2663 /* Most significant bits 16-31 of address/value: MOVZ. */
2667 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2672 /* 15 bit offset into the page containing GOT entry for that symbol. */
2678 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2681 /* Get to the page containing GOT TLS entry for a symbol */
2682 {"gottprel_g0_nc", 0,
2685 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2690 /* Get to the page containing GOT TLS entry for a symbol */
2694 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2699 /* Get to the page containing GOT TLS entry for a symbol */
2701 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2702 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2708 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2713 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2717 /* Lower 16 bits address/value: MOVk. */
2721 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2726 /* Most significant bits 16-31 of address/value: MOVZ. */
2730 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2735 /* Get to the page containing GOT TLS entry for a symbol */
2737 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2738 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2742 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2744 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2749 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2750 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2753 /* Get to the page containing GOT TLS entry for a symbol.
2754 The same as GD, we allocate two consecutive GOT slots
2755 for module index and module offset, the only difference
2756 with GD is the module offset should be initialized to
2757 zero without any outstanding runtime relocation. */
2759 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2760 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2766 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2767 {"tlsldm_lo12_nc", 0,
2771 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2775 /* 12 bit offset into the module TLS base address. */
2780 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2781 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2784 /* Same as dtprel_lo12, no overflow check. */
2785 {"dtprel_lo12_nc", 0,
2789 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2790 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2793 /* bits[23:12] of offset to the module TLS base address. */
2798 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2802 /* bits[15:0] of offset to the module TLS base address. */
2806 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2811 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2815 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2820 /* bits[31:16] of offset to the module TLS base address. */
2824 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2829 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2833 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2838 /* bits[47:32] of offset to the module TLS base address. */
2842 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2847 /* Lower 16 bit offset into GOT entry for a symbol */
2848 {"tlsdesc_off_g0_nc", 0,
2851 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2856 /* Higher 16 bit offset into GOT entry for a symbol */
2857 {"tlsdesc_off_g1", 0,
2860 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2865 /* Get to the page containing GOT TLS entry for a symbol */
2868 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2872 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2874 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2875 {"gottprel_lo12", 0,
2880 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2883 /* Get tp offset for a symbol. */
2888 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2892 /* Get tp offset for a symbol. */
2897 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2898 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2901 /* Get tp offset for a symbol. */
2906 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2910 /* Get tp offset for a symbol. */
2911 {"tprel_lo12_nc", 0,
2915 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2916 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2919 /* Most significant bits 32-47 of address/value: MOVZ. */
2923 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2928 /* Most significant bits 16-31 of address/value: MOVZ. */
2932 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2937 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2941 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2946 /* Most significant bits 0-15 of address/value: MOVZ. */
2950 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2955 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2959 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2964 /* 15bit offset from got entry to base address of GOT table. */
2970 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2973 /* 14bit offset from got entry to base address of GOT table. */
2979 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2983 /* Given the address of a pointer pointing to the textual name of a
2984 relocation as may appear in assembler source, attempt to find its
2985 details in reloc_table. The pointer will be updated to the character
2986 after the trailing colon. On failure, NULL will be returned;
2987 otherwise return the reloc_table_entry. */
2989 static struct reloc_table_entry *
2990 find_reloc_table_entry (char **str)
2993 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2995 int length = strlen (reloc_table[i].name);
2997 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2998 && (*str)[length] == ':')
3000 *str += (length + 1);
3001 return &reloc_table[i];
3008 /* Mode argument to parse_shift and parser_shifter_operand. */
3009 enum parse_shift_mode
3011 SHIFTED_NONE, /* no shifter allowed */
3012 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3014 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3016 SHIFTED_LSL, /* bare "lsl #n" */
3017 SHIFTED_MUL, /* bare "mul #n" */
3018 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3019 SHIFTED_MUL_VL, /* "mul vl" */
3020 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3023 /* Parse a <shift> operator on an AArch64 data processing instruction.
3024 Return TRUE on success; otherwise return FALSE. */
3026 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3028 const struct aarch64_name_value_pair *shift_op;
3029 enum aarch64_modifier_kind kind;
3035 for (p = *str; ISALPHA (*p); p++)
3040 set_syntax_error (_("shift expression expected"));
3044 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
3046 if (shift_op == NULL)
3048 set_syntax_error (_("shift operator expected"));
3052 kind = aarch64_get_operand_modifier (shift_op);
3054 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3056 set_syntax_error (_("invalid use of 'MSL'"));
3060 if (kind == AARCH64_MOD_MUL
3061 && mode != SHIFTED_MUL
3062 && mode != SHIFTED_MUL_VL)
3064 set_syntax_error (_("invalid use of 'MUL'"));
3070 case SHIFTED_LOGIC_IMM:
3071 if (aarch64_extend_operator_p (kind))
3073 set_syntax_error (_("extending shift is not permitted"));
3078 case SHIFTED_ARITH_IMM:
3079 if (kind == AARCH64_MOD_ROR)
3081 set_syntax_error (_("'ROR' shift is not permitted"));
3087 if (kind != AARCH64_MOD_LSL)
3089 set_syntax_error (_("only 'LSL' shift is permitted"));
3095 if (kind != AARCH64_MOD_MUL)
3097 set_syntax_error (_("only 'MUL' is permitted"));
3102 case SHIFTED_MUL_VL:
3103 /* "MUL VL" consists of two separate tokens. Require the first
3104 token to be "MUL" and look for a following "VL". */
3105 if (kind == AARCH64_MOD_MUL)
3107 skip_whitespace (p);
3108 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3111 kind = AARCH64_MOD_MUL_VL;
3115 set_syntax_error (_("only 'MUL VL' is permitted"));
3118 case SHIFTED_REG_OFFSET:
3119 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3120 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3122 set_fatal_syntax_error
3123 (_("invalid shift for the register offset addressing mode"));
3128 case SHIFTED_LSL_MSL:
3129 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3131 set_syntax_error (_("invalid shift operator"));
3140 /* Whitespace can appear here if the next thing is a bare digit. */
3141 skip_whitespace (p);
3143 /* Parse shift amount. */
3145 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3146 exp.X_op = O_absent;
3149 if (is_immediate_prefix (*p))
3154 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3156 if (kind == AARCH64_MOD_MUL_VL)
3157 /* For consistency, give MUL VL the same shift amount as an implicit
3159 operand->shifter.amount = 1;
3160 else if (exp.X_op == O_absent)
3162 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3164 set_syntax_error (_("missing shift amount"));
3167 operand->shifter.amount = 0;
3169 else if (exp.X_op != O_constant)
3171 set_syntax_error (_("constant shift amount required"));
3174 /* For parsing purposes, MUL #n has no inherent range. The range
3175 depends on the operand and will be checked by operand-specific
3177 else if (kind != AARCH64_MOD_MUL
3178 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3180 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3185 operand->shifter.amount = exp.X_add_number;
3186 operand->shifter.amount_present = 1;
3189 operand->shifter.operator_present = 1;
3190 operand->shifter.kind = kind;
3196 /* Parse a <shifter_operand> for a data processing instruction:
3199 #<immediate>, LSL #imm
3201 Validation of immediate operands is deferred to md_apply_fix.
3203 Return TRUE on success; otherwise return FALSE. */
3206 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3207 enum parse_shift_mode mode)
3211 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3216 /* Accept an immediate expression. */
3217 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3220 /* Accept optional LSL for arithmetic immediate values. */
3221 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3222 if (! parse_shift (&p, operand, SHIFTED_LSL))
3225 /* Not accept any shifter for logical immediate values. */
3226 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3227 && parse_shift (&p, operand, mode))
3229 set_syntax_error (_("unexpected shift operator"));
3237 /* Parse a <shifter_operand> for a data processing instruction:
3242 #<immediate>, LSL #imm
3244 where <shift> is handled by parse_shift above, and the last two
3245 cases are handled by the function above.
3247 Validation of immediate operands is deferred to md_apply_fix.
3249 Return TRUE on success; otherwise return FALSE. */
3252 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3253 enum parse_shift_mode mode)
3255 const reg_entry *reg;
3256 aarch64_opnd_qualifier_t qualifier;
3257 enum aarch64_operand_class opd_class
3258 = aarch64_get_operand_class (operand->type);
3260 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3263 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3265 set_syntax_error (_("unexpected register in the immediate operand"));
3269 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3271 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3275 operand->reg.regno = reg->number;
3276 operand->qualifier = qualifier;
3278 /* Accept optional shift operation on register. */
3279 if (! skip_past_comma (str))
3282 if (! parse_shift (str, operand, mode))
3287 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3290 (_("integer register expected in the extended/shifted operand "
3295 /* We have a shifted immediate variable. */
3296 return parse_shifter_operand_imm (str, operand, mode);
3299 /* Return TRUE on success; return FALSE otherwise. */
3302 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3303 enum parse_shift_mode mode)
3307 /* Determine if we have the sequence of characters #: or just :
3308 coming next. If we do, then we check for a :rello: relocation
3309 modifier. If we don't, punt the whole lot to
3310 parse_shifter_operand. */
3312 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3314 struct reloc_table_entry *entry;
3322 /* Try to parse a relocation. Anything else is an error. */
3323 if (!(entry = find_reloc_table_entry (str)))
3325 set_syntax_error (_("unknown relocation modifier"));
3329 if (entry->add_type == 0)
3332 (_("this relocation modifier is not allowed on this instruction"));
3336 /* Save str before we decompose it. */
3339 /* Next, we parse the expression. */
3340 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3343 /* Record the relocation type (use the ADD variant here). */
3344 inst.reloc.type = entry->add_type;
3345 inst.reloc.pc_rel = entry->pc_rel;
3347 /* If str is empty, we've reached the end, stop here. */
3351 /* Otherwise, we have a shifted reloc modifier, so rewind to
3352 recover the variable name and continue parsing for the shifter. */
3354 return parse_shifter_operand_imm (str, operand, mode);
3357 return parse_shifter_operand (str, operand, mode);
3360 /* Parse all forms of an address expression. Information is written
3361 to *OPERAND and/or inst.reloc.
3363 The A64 instruction set has the following addressing modes:
3366 [base] // in SIMD ld/st structure
3367 [base{,#0}] // in ld/st exclusive
3369 [base,Xm{,LSL #imm}]
3370 [base,Xm,SXTX {#imm}]
3371 [base,Wm,(S|U)XTW {#imm}]
3376 [base],Xm // in SIMD ld/st structure
3377 PC-relative (literal)
3381 [base,Zm.D{,LSL #imm}]
3382 [base,Zm.S,(S|U)XTW {#imm}]
3383 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3386 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3387 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3388 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3390 (As a convenience, the notation "=immediate" is permitted in conjunction
3391 with the pc-relative literal load instructions to automatically place an
3392 immediate value or symbolic address in a nearby literal pool and generate
3393 a hidden label which references it.)
3395 Upon a successful parsing, the address structure in *OPERAND will be
3396 filled in the following way:
3398 .base_regno = <base>
3399 .offset.is_reg // 1 if the offset is a register
3401 .offset.regno = <Rm>
3403 For different addressing modes defined in the A64 ISA:
3406 .pcrel=0; .preind=1; .postind=0; .writeback=0
3408 .pcrel=0; .preind=1; .postind=0; .writeback=1
3410 .pcrel=0; .preind=0; .postind=1; .writeback=1
3411 PC-relative (literal)
3412 .pcrel=1; .preind=1; .postind=0; .writeback=0
3414 The shift/extension information, if any, will be stored in .shifter.
3415 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3416 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3417 corresponding register.
3419 BASE_TYPE says which types of base register should be accepted and
3420 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3421 is the type of shifter that is allowed for immediate offsets,
3422 or SHIFTED_NONE if none.
3424 In all other respects, it is the caller's responsibility to check
3425 for addressing modes not supported by the instruction, and to set
3429 parse_address_main (char **str, aarch64_opnd_info *operand,
3430 aarch64_opnd_qualifier_t *base_qualifier,
3431 aarch64_opnd_qualifier_t *offset_qualifier,
3432 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3433 enum parse_shift_mode imm_shift_mode)
3436 const reg_entry *reg;
3437 expressionS *exp = &inst.reloc.exp;
3439 *base_qualifier = AARCH64_OPND_QLF_NIL;
3440 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3441 if (! skip_past_char (&p, '['))
3443 /* =immediate or label. */
3444 operand->addr.pcrel = 1;
3445 operand->addr.preind = 1;
3447 /* #:<reloc_op>:<symbol> */
3448 skip_past_char (&p, '#');
3449 if (skip_past_char (&p, ':'))
3451 bfd_reloc_code_real_type ty;
3452 struct reloc_table_entry *entry;
3454 /* Try to parse a relocation modifier. Anything else is
3456 entry = find_reloc_table_entry (&p);
3459 set_syntax_error (_("unknown relocation modifier"));
3463 switch (operand->type)
3465 case AARCH64_OPND_ADDR_PCREL21:
3467 ty = entry->adr_type;
3471 ty = entry->ld_literal_type;
3478 (_("this relocation modifier is not allowed on this "
3484 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3486 set_syntax_error (_("invalid relocation expression"));
3490 /* #:<reloc_op>:<expr> */
3491 /* Record the relocation type. */
3492 inst.reloc.type = ty;
3493 inst.reloc.pc_rel = entry->pc_rel;
3498 if (skip_past_char (&p, '='))
3499 /* =immediate; need to generate the literal in the literal pool. */
3500 inst.gen_lit_pool = 1;
3502 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3504 set_syntax_error (_("invalid address"));
3515 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3516 if (!reg || !aarch64_check_reg_type (reg, base_type))
3518 set_syntax_error (_(get_reg_expected_msg (base_type)));
3521 operand->addr.base_regno = reg->number;
3524 if (skip_past_comma (&p))
3527 operand->addr.preind = 1;
3529 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3532 if (!aarch64_check_reg_type (reg, offset_type))
3534 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3539 operand->addr.offset.regno = reg->number;
3540 operand->addr.offset.is_reg = 1;
3541 /* Shifted index. */
3542 if (skip_past_comma (&p))
3545 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3546 /* Use the diagnostics set in parse_shift, so not set new
3547 error message here. */
3551 [base,Xm{,LSL #imm}]
3552 [base,Xm,SXTX {#imm}]
3553 [base,Wm,(S|U)XTW {#imm}] */
3554 if (operand->shifter.kind == AARCH64_MOD_NONE
3555 || operand->shifter.kind == AARCH64_MOD_LSL
3556 || operand->shifter.kind == AARCH64_MOD_SXTX)
3558 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3560 set_syntax_error (_("invalid use of 32-bit register offset"));
3563 if (aarch64_get_qualifier_esize (*base_qualifier)
3564 != aarch64_get_qualifier_esize (*offset_qualifier))
3566 set_syntax_error (_("offset has different size from base"));
3570 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3572 set_syntax_error (_("invalid use of 64-bit register offset"));
3578 /* [Xn,#:<reloc_op>:<symbol> */
3579 skip_past_char (&p, '#');
3580 if (skip_past_char (&p, ':'))
3582 struct reloc_table_entry *entry;
3584 /* Try to parse a relocation modifier. Anything else is
3586 if (!(entry = find_reloc_table_entry (&p)))
3588 set_syntax_error (_("unknown relocation modifier"));
3592 if (entry->ldst_type == 0)
3595 (_("this relocation modifier is not allowed on this "
3600 /* [Xn,#:<reloc_op>: */
3601 /* We now have the group relocation table entry corresponding to
3602 the name in the assembler source. Next, we parse the
3604 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3606 set_syntax_error (_("invalid relocation expression"));
3610 /* [Xn,#:<reloc_op>:<expr> */
3611 /* Record the load/store relocation type. */
3612 inst.reloc.type = entry->ldst_type;
3613 inst.reloc.pc_rel = entry->pc_rel;
3617 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3619 set_syntax_error (_("invalid expression in the address"));
3623 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3624 /* [Xn,<expr>,<shifter> */
3625 if (! parse_shift (&p, operand, imm_shift_mode))
3631 if (! skip_past_char (&p, ']'))
3633 set_syntax_error (_("']' expected"));
3637 if (skip_past_char (&p, '!'))
3639 if (operand->addr.preind && operand->addr.offset.is_reg)
3641 set_syntax_error (_("register offset not allowed in pre-indexed "
3642 "addressing mode"));
3646 operand->addr.writeback = 1;
3648 else if (skip_past_comma (&p))
3651 operand->addr.postind = 1;
3652 operand->addr.writeback = 1;
3654 if (operand->addr.preind)
3656 set_syntax_error (_("cannot combine pre- and post-indexing"));
3660 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3664 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3666 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3670 operand->addr.offset.regno = reg->number;
3671 operand->addr.offset.is_reg = 1;
3673 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3676 set_syntax_error (_("invalid expression in the address"));
3681 /* If at this point neither .preind nor .postind is set, we have a
3682 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3683 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3685 if (operand->addr.writeback)
3688 set_syntax_error (_("missing offset in the pre-indexed address"));
3692 operand->addr.preind = 1;
3693 inst.reloc.exp.X_op = O_constant;
3694 inst.reloc.exp.X_add_number = 0;
3701 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3704 parse_address (char **str, aarch64_opnd_info *operand)
3706 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3707 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3708 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3711 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3712 The arguments have the same meaning as for parse_address_main.
3713 Return TRUE on success. */
3715 parse_sve_address (char **str, aarch64_opnd_info *operand,
3716 aarch64_opnd_qualifier_t *base_qualifier,
3717 aarch64_opnd_qualifier_t *offset_qualifier)
3719 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3720 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3724 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3725 Return TRUE on success; otherwise return FALSE. */
3727 parse_half (char **str, int *internal_fixup_p)
3731 skip_past_char (&p, '#');
3733 gas_assert (internal_fixup_p);
3734 *internal_fixup_p = 0;
3738 struct reloc_table_entry *entry;
3740 /* Try to parse a relocation. Anything else is an error. */
3742 if (!(entry = find_reloc_table_entry (&p)))
3744 set_syntax_error (_("unknown relocation modifier"));
3748 if (entry->movw_type == 0)
3751 (_("this relocation modifier is not allowed on this instruction"));
3755 inst.reloc.type = entry->movw_type;
3758 *internal_fixup_p = 1;
3760 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3767 /* Parse an operand for an ADRP instruction:
3769 Return TRUE on success; otherwise return FALSE. */
3772 parse_adrp (char **str)
3779 struct reloc_table_entry *entry;
3781 /* Try to parse a relocation. Anything else is an error. */
3783 if (!(entry = find_reloc_table_entry (&p)))
3785 set_syntax_error (_("unknown relocation modifier"));
3789 if (entry->adrp_type == 0)
3792 (_("this relocation modifier is not allowed on this instruction"));
3796 inst.reloc.type = entry->adrp_type;
3799 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3801 inst.reloc.pc_rel = 1;
3803 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3810 /* Miscellaneous. */
3812 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3813 of SIZE tokens in which index I gives the token for field value I,
3814 or is null if field value I is invalid. REG_TYPE says which register
3815 names should be treated as registers rather than as symbolic immediates.
3817 Return true on success, moving *STR past the operand and storing the
3818 field value in *VAL. */
3821 parse_enum_string (char **str, int64_t *val, const char *const *array,
3822 size_t size, aarch64_reg_type reg_type)
3828 /* Match C-like tokens. */
3830 while (ISALNUM (*q))
3833 for (i = 0; i < size; ++i)
3835 && strncasecmp (array[i], p, q - p) == 0
3836 && array[i][q - p] == 0)
3843 if (!parse_immediate_expression (&p, &exp, reg_type))
3846 if (exp.X_op == O_constant
3847 && (uint64_t) exp.X_add_number < size)
3849 *val = exp.X_add_number;
3854 /* Use the default error for this operand. */
3858 /* Parse an option for a preload instruction. Returns the encoding for the
3859 option, or PARSE_FAIL. */
3862 parse_pldop (char **str)
3865 const struct aarch64_name_value_pair *o;
3868 while (ISALNUM (*q))
3871 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3879 /* Parse an option for a barrier instruction. Returns the encoding for the
3880 option, or PARSE_FAIL. */
3883 parse_barrier (char **str)
3886 const asm_barrier_opt *o;
3889 while (ISALPHA (*q))
3892 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3900 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3901 return 0 if successful. Otherwise return PARSE_FAIL. */
3904 parse_barrier_psb (char **str,
3905 const struct aarch64_name_value_pair ** hint_opt)
3908 const struct aarch64_name_value_pair *o;
3911 while (ISALPHA (*q))
3914 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3917 set_fatal_syntax_error
3918 ( _("unknown or missing option to PSB"));
3922 if (o->value != 0x11)
3924 /* PSB only accepts option name 'CSYNC'. */
3926 (_("the specified option is not accepted for PSB"));
3935 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3936 Returns the encoding for the option, or PARSE_FAIL.
3938 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3939 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3941 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3942 field, otherwise as a system register.
3946 parse_sys_reg (char **str, struct hash_control *sys_regs,
3947 int imple_defined_p, int pstatefield_p,
3952 const aarch64_sys_reg *o;
3956 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3958 *p++ = TOLOWER (*q);
3960 /* Assert that BUF be large enough. */
3961 gas_assert (p - buf == q - *str);
3963 o = hash_find (sys_regs, buf);
3966 if (!imple_defined_p)
3970 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3971 unsigned int op0, op1, cn, cm, op2;
3973 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3976 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3978 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3985 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3986 as_bad (_("selected processor does not support PSTATE field "
3988 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3989 as_bad (_("selected processor does not support system register "
3991 if (aarch64_sys_reg_deprecated_p (o))
3992 as_warn (_("system register name '%s' is deprecated and may be "
3993 "removed in a future release"), buf);
4003 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4004 for the option, or NULL. */
4006 static const aarch64_sys_ins_reg *
4007 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
4011 const aarch64_sys_ins_reg *o;
4014 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4016 *p++ = TOLOWER (*q);
4019 o = hash_find (sys_ins_regs, buf);
4023 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
4024 as_bad (_("selected processor does not support system register "
4031 #define po_char_or_fail(chr) do { \
4032 if (! skip_past_char (&str, chr)) \
4036 #define po_reg_or_fail(regtype) do { \
4037 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4038 if (val == PARSE_FAIL) \
4040 set_default_error (); \
4045 #define po_int_reg_or_fail(reg_type) do { \
4046 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4047 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4049 set_default_error (); \
4052 info->reg.regno = reg->number; \
4053 info->qualifier = qualifier; \
4056 #define po_imm_nc_or_fail() do { \
4057 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4061 #define po_imm_or_fail(min, max) do { \
4062 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4064 if (val < min || val > max) \
4066 set_fatal_syntax_error (_("immediate value out of range "\
4067 #min " to "#max)); \
4072 #define po_enum_or_fail(array) do { \
4073 if (!parse_enum_string (&str, &val, array, \
4074 ARRAY_SIZE (array), imm_reg_type)) \
4078 #define po_misc_or_fail(expr) do { \
4083 /* encode the 12-bit imm field of Add/sub immediate */
4084 static inline uint32_t
4085 encode_addsub_imm (uint32_t imm)
4090 /* encode the shift amount field of Add/sub immediate */
4091 static inline uint32_t
4092 encode_addsub_imm_shift_amount (uint32_t cnt)
4098 /* encode the imm field of Adr instruction */
4099 static inline uint32_t
4100 encode_adr_imm (uint32_t imm)
4102 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4103 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4106 /* encode the immediate field of Move wide immediate */
4107 static inline uint32_t
4108 encode_movw_imm (uint32_t imm)
4113 /* encode the 26-bit offset of unconditional branch */
4114 static inline uint32_t
4115 encode_branch_ofs_26 (uint32_t ofs)
4117 return ofs & ((1 << 26) - 1);
4120 /* encode the 19-bit offset of conditional branch and compare & branch */
4121 static inline uint32_t
4122 encode_cond_branch_ofs_19 (uint32_t ofs)
4124 return (ofs & ((1 << 19) - 1)) << 5;
4127 /* encode the 19-bit offset of ld literal */
4128 static inline uint32_t
4129 encode_ld_lit_ofs_19 (uint32_t ofs)
4131 return (ofs & ((1 << 19) - 1)) << 5;
4134 /* Encode the 14-bit offset of test & branch. */
4135 static inline uint32_t
4136 encode_tst_branch_ofs_14 (uint32_t ofs)
4138 return (ofs & ((1 << 14) - 1)) << 5;
4141 /* Encode the 16-bit imm field of svc/hvc/smc. */
4142 static inline uint32_t
4143 encode_svc_imm (uint32_t imm)
4148 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4149 static inline uint32_t
4150 reencode_addsub_switch_add_sub (uint32_t opcode)
4152 return opcode ^ (1 << 30);
4155 static inline uint32_t
4156 reencode_movzn_to_movz (uint32_t opcode)
4158 return opcode | (1 << 30);
4161 static inline uint32_t
4162 reencode_movzn_to_movn (uint32_t opcode)
4164 return opcode & ~(1 << 30);
4167 /* Overall per-instruction processing. */
4169 /* We need to be able to fix up arbitrary expressions in some statements.
4170 This is so that we can handle symbols that are an arbitrary distance from
4171 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4172 which returns part of an address in a form which will be valid for
4173 a data instruction. We do this by pushing the expression into a symbol
4174 in the expr_section, and creating a fix for that. */
4177 fix_new_aarch64 (fragS * frag,
4179 short int size, expressionS * exp, int pc_rel, int reloc)
4189 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4193 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4200 /* Diagnostics on operands errors. */
4202 /* By default, output verbose error message.
4203 Disable the verbose error message by -mno-verbose-error. */
4204 static int verbose_error_p = 1;
4206 #ifdef DEBUG_AARCH64
4207 /* N.B. this is only for the purpose of debugging. */
4208 const char* operand_mismatch_kind_names[] =
4211 "AARCH64_OPDE_RECOVERABLE",
4212 "AARCH64_OPDE_SYNTAX_ERROR",
4213 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4214 "AARCH64_OPDE_INVALID_VARIANT",
4215 "AARCH64_OPDE_OUT_OF_RANGE",
4216 "AARCH64_OPDE_UNALIGNED",
4217 "AARCH64_OPDE_REG_LIST",
4218 "AARCH64_OPDE_OTHER_ERROR",
4220 #endif /* DEBUG_AARCH64 */
4222 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4224 When multiple errors of different kinds are found in the same assembly
4225 line, only the error of the highest severity will be picked up for
4226 issuing the diagnostics. */
4228 static inline bfd_boolean
4229 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4230 enum aarch64_operand_error_kind rhs)
4232 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4233 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4234 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4235 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4236 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4237 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4238 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4239 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4243 /* Helper routine to get the mnemonic name from the assembly instruction
4244 line; should only be called for the diagnosis purpose, as there is
4245 string copy operation involved, which may affect the runtime
4246 performance if used in elsewhere. */
4249 get_mnemonic_name (const char *str)
4251 static char mnemonic[32];
4254 /* Get the first 15 bytes and assume that the full name is included. */
4255 strncpy (mnemonic, str, 31);
4256 mnemonic[31] = '\0';
4258 /* Scan up to the end of the mnemonic, which must end in white space,
4259 '.', or end of string. */
4260 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4265 /* Append '...' to the truncated long name. */
4266 if (ptr - mnemonic == 31)
4267 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4273 reset_aarch64_instruction (aarch64_instruction *instruction)
4275 memset (instruction, '\0', sizeof (aarch64_instruction));
4276 instruction->reloc.type = BFD_RELOC_UNUSED;
4279 /* Data structures storing one user error in the assembly code related to
4282 struct operand_error_record
4284 const aarch64_opcode *opcode;
4285 aarch64_operand_error detail;
4286 struct operand_error_record *next;
4289 typedef struct operand_error_record operand_error_record;
4291 struct operand_errors
4293 operand_error_record *head;
4294 operand_error_record *tail;
4297 typedef struct operand_errors operand_errors;
4299 /* Top-level data structure reporting user errors for the current line of
4301 The way md_assemble works is that all opcodes sharing the same mnemonic
4302 name are iterated to find a match to the assembly line. In this data
4303 structure, each of the such opcodes will have one operand_error_record
4304 allocated and inserted. In other words, excessive errors related with
4305 a single opcode are disregarded. */
4306 operand_errors operand_error_report;
4308 /* Free record nodes. */
4309 static operand_error_record *free_opnd_error_record_nodes = NULL;
4311 /* Initialize the data structure that stores the operand mismatch
4312 information on assembling one line of the assembly code. */
4314 init_operand_error_report (void)
4316 if (operand_error_report.head != NULL)
4318 gas_assert (operand_error_report.tail != NULL);
4319 operand_error_report.tail->next = free_opnd_error_record_nodes;
4320 free_opnd_error_record_nodes = operand_error_report.head;
4321 operand_error_report.head = NULL;
4322 operand_error_report.tail = NULL;
4325 gas_assert (operand_error_report.tail == NULL);
4328 /* Return TRUE if some operand error has been recorded during the
4329 parsing of the current assembly line using the opcode *OPCODE;
4330 otherwise return FALSE. */
4331 static inline bfd_boolean
4332 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4334 operand_error_record *record = operand_error_report.head;
4335 return record && record->opcode == opcode;
4338 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4339 OPCODE field is initialized with OPCODE.
4340 N.B. only one record for each opcode, i.e. the maximum of one error is
4341 recorded for each instruction template. */
4344 add_operand_error_record (const operand_error_record* new_record)
4346 const aarch64_opcode *opcode = new_record->opcode;
4347 operand_error_record* record = operand_error_report.head;
4349 /* The record may have been created for this opcode. If not, we need
4351 if (! opcode_has_operand_error_p (opcode))
4353 /* Get one empty record. */
4354 if (free_opnd_error_record_nodes == NULL)
4356 record = XNEW (operand_error_record);
4360 record = free_opnd_error_record_nodes;
4361 free_opnd_error_record_nodes = record->next;
4363 record->opcode = opcode;
4364 /* Insert at the head. */
4365 record->next = operand_error_report.head;
4366 operand_error_report.head = record;
4367 if (operand_error_report.tail == NULL)
4368 operand_error_report.tail = record;
4370 else if (record->detail.kind != AARCH64_OPDE_NIL
4371 && record->detail.index <= new_record->detail.index
4372 && operand_error_higher_severity_p (record->detail.kind,
4373 new_record->detail.kind))
4375 /* In the case of multiple errors found on operands related with a
4376 single opcode, only record the error of the leftmost operand and
4377 only if the error is of higher severity. */
4378 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4379 " the existing error %s on operand %d",
4380 operand_mismatch_kind_names[new_record->detail.kind],
4381 new_record->detail.index,
4382 operand_mismatch_kind_names[record->detail.kind],
4383 record->detail.index);
4387 record->detail = new_record->detail;
4391 record_operand_error_info (const aarch64_opcode *opcode,
4392 aarch64_operand_error *error_info)
4394 operand_error_record record;
4395 record.opcode = opcode;
4396 record.detail = *error_info;
4397 add_operand_error_record (&record);
4400 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4401 error message *ERROR, for operand IDX (count from 0). */
4404 record_operand_error (const aarch64_opcode *opcode, int idx,
4405 enum aarch64_operand_error_kind kind,
4408 aarch64_operand_error info;
4409 memset(&info, 0, sizeof (info));
4413 info.non_fatal = FALSE;
4414 record_operand_error_info (opcode, &info);
4418 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4419 enum aarch64_operand_error_kind kind,
4420 const char* error, const int *extra_data)
4422 aarch64_operand_error info;
4426 info.data[0] = extra_data[0];
4427 info.data[1] = extra_data[1];
4428 info.data[2] = extra_data[2];
4429 info.non_fatal = FALSE;
4430 record_operand_error_info (opcode, &info);
4434 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4435 const char* error, int lower_bound,
4438 int data[3] = {lower_bound, upper_bound, 0};
4439 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4443 /* Remove the operand error record for *OPCODE. */
4444 static void ATTRIBUTE_UNUSED
4445 remove_operand_error_record (const aarch64_opcode *opcode)
4447 if (opcode_has_operand_error_p (opcode))
4449 operand_error_record* record = operand_error_report.head;
4450 gas_assert (record != NULL && operand_error_report.tail != NULL);
4451 operand_error_report.head = record->next;
4452 record->next = free_opnd_error_record_nodes;
4453 free_opnd_error_record_nodes = record;
4454 if (operand_error_report.head == NULL)
4456 gas_assert (operand_error_report.tail == record);
4457 operand_error_report.tail = NULL;
4462 /* Given the instruction in *INSTR, return the index of the best matched
4463 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4465 Return -1 if there is no qualifier sequence; return the first match
4466 if there is multiple matches found. */
4469 find_best_match (const aarch64_inst *instr,
4470 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4472 int i, num_opnds, max_num_matched, idx;
4474 num_opnds = aarch64_num_of_operands (instr->opcode);
4477 DEBUG_TRACE ("no operand");
4481 max_num_matched = 0;
4484 /* For each pattern. */
4485 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4488 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4490 /* Most opcodes has much fewer patterns in the list. */
4491 if (empty_qualifier_sequence_p (qualifiers))
4493 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4497 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4498 if (*qualifiers == instr->operands[j].qualifier)
4501 if (num_matched > max_num_matched)
4503 max_num_matched = num_matched;
4508 DEBUG_TRACE ("return with %d", idx);
4512 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4513 corresponding operands in *INSTR. */
4516 assign_qualifier_sequence (aarch64_inst *instr,
4517 const aarch64_opnd_qualifier_t *qualifiers)
4520 int num_opnds = aarch64_num_of_operands (instr->opcode);
4521 gas_assert (num_opnds);
4522 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4523 instr->operands[i].qualifier = *qualifiers;
4526 /* Print operands for the diagnosis purpose. */
4529 print_operands (char *buf, const aarch64_opcode *opcode,
4530 const aarch64_opnd_info *opnds)
4534 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4538 /* We regard the opcode operand info more, however we also look into
4539 the inst->operands to support the disassembling of the optional
4541 The two operand code should be the same in all cases, apart from
4542 when the operand can be optional. */
4543 if (opcode->operands[i] == AARCH64_OPND_NIL
4544 || opnds[i].type == AARCH64_OPND_NIL)
4547 /* Generate the operand string in STR. */
4548 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4553 strcat (buf, i == 0 ? " " : ", ");
4555 /* Append the operand string. */
4560 /* Send to stderr a string as information. */
4563 output_info (const char *format, ...)
4569 file = as_where (&line);
4573 fprintf (stderr, "%s:%u: ", file, line);
4575 fprintf (stderr, "%s: ", file);
4577 fprintf (stderr, _("Info: "));
4578 va_start (args, format);
4579 vfprintf (stderr, format, args);
4581 (void) putc ('\n', stderr);
4584 /* Output one operand error record. */
4587 output_operand_error_record (const operand_error_record *record, char *str)
4589 const aarch64_operand_error *detail = &record->detail;
4590 int idx = detail->index;
4591 const aarch64_opcode *opcode = record->opcode;
4592 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4593 : AARCH64_OPND_NIL);
4595 typedef void (*handler_t)(const char *format, ...);
4596 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4598 switch (detail->kind)
4600 case AARCH64_OPDE_NIL:
4603 case AARCH64_OPDE_SYNTAX_ERROR:
4604 case AARCH64_OPDE_RECOVERABLE:
4605 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4606 case AARCH64_OPDE_OTHER_ERROR:
4607 /* Use the prepared error message if there is, otherwise use the
4608 operand description string to describe the error. */
4609 if (detail->error != NULL)
4612 handler (_("%s -- `%s'"), detail->error, str);
4614 handler (_("%s at operand %d -- `%s'"),
4615 detail->error, idx + 1, str);
4619 gas_assert (idx >= 0);
4620 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4621 aarch64_get_operand_desc (opd_code), str);
4625 case AARCH64_OPDE_INVALID_VARIANT:
4626 handler (_("operand mismatch -- `%s'"), str);
4627 if (verbose_error_p)
4629 /* We will try to correct the erroneous instruction and also provide
4630 more information e.g. all other valid variants.
4632 The string representation of the corrected instruction and other
4633 valid variants are generated by
4635 1) obtaining the intermediate representation of the erroneous
4637 2) manipulating the IR, e.g. replacing the operand qualifier;
4638 3) printing out the instruction by calling the printer functions
4639 shared with the disassembler.
4641 The limitation of this method is that the exact input assembly
4642 line cannot be accurately reproduced in some cases, for example an
4643 optional operand present in the actual assembly line will be
4644 omitted in the output; likewise for the optional syntax rules,
4645 e.g. the # before the immediate. Another limitation is that the
4646 assembly symbols and relocation operations in the assembly line
4647 currently cannot be printed out in the error report. Last but not
4648 least, when there is other error(s) co-exist with this error, the
4649 'corrected' instruction may be still incorrect, e.g. given
4650 'ldnp h0,h1,[x0,#6]!'
4651 this diagnosis will provide the version:
4652 'ldnp s0,s1,[x0,#6]!'
4653 which is still not right. */
4654 size_t len = strlen (get_mnemonic_name (str));
4658 aarch64_inst *inst_base = &inst.base;
4659 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4662 reset_aarch64_instruction (&inst);
4663 inst_base->opcode = opcode;
4665 /* Reset the error report so that there is no side effect on the
4666 following operand parsing. */
4667 init_operand_error_report ();
4670 result = parse_operands (str + len, opcode)
4671 && programmer_friendly_fixup (&inst);
4672 gas_assert (result);
4673 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4674 NULL, NULL, insn_sequence);
4675 gas_assert (!result);
4677 /* Find the most matched qualifier sequence. */
4678 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4679 gas_assert (qlf_idx > -1);
4681 /* Assign the qualifiers. */
4682 assign_qualifier_sequence (inst_base,
4683 opcode->qualifiers_list[qlf_idx]);
4685 /* Print the hint. */
4686 output_info (_(" did you mean this?"));
4687 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4688 print_operands (buf, opcode, inst_base->operands);
4689 output_info (_(" %s"), buf);
4691 /* Print out other variant(s) if there is any. */
4693 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4694 output_info (_(" other valid variant(s):"));
4696 /* For each pattern. */
4697 qualifiers_list = opcode->qualifiers_list;
4698 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4700 /* Most opcodes has much fewer patterns in the list.
4701 First NIL qualifier indicates the end in the list. */
4702 if (empty_qualifier_sequence_p (*qualifiers_list))
4707 /* Mnemonics name. */
4708 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4710 /* Assign the qualifiers. */
4711 assign_qualifier_sequence (inst_base, *qualifiers_list);
4713 /* Print instruction. */
4714 print_operands (buf, opcode, inst_base->operands);
4716 output_info (_(" %s"), buf);
4722 case AARCH64_OPDE_UNTIED_OPERAND:
4723 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4724 detail->index + 1, str);
4727 case AARCH64_OPDE_OUT_OF_RANGE:
4728 if (detail->data[0] != detail->data[1])
4729 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4730 detail->error ? detail->error : _("immediate value"),
4731 detail->data[0], detail->data[1], idx + 1, str);
4733 handler (_("%s must be %d at operand %d -- `%s'"),
4734 detail->error ? detail->error : _("immediate value"),
4735 detail->data[0], idx + 1, str);
4738 case AARCH64_OPDE_REG_LIST:
4739 if (detail->data[0] == 1)
4740 handler (_("invalid number of registers in the list; "
4741 "only 1 register is expected at operand %d -- `%s'"),
4744 handler (_("invalid number of registers in the list; "
4745 "%d registers are expected at operand %d -- `%s'"),
4746 detail->data[0], idx + 1, str);
4749 case AARCH64_OPDE_UNALIGNED:
4750 handler (_("immediate value must be a multiple of "
4751 "%d at operand %d -- `%s'"),
4752 detail->data[0], idx + 1, str);
4761 /* Process and output the error message about the operand mismatching.
4763 When this function is called, the operand error information had
4764 been collected for an assembly line and there will be multiple
4765 errors in the case of multiple instruction templates; output the
4766 error message that most closely describes the problem.
4768 The errors to be printed can be filtered on printing all errors
4769 or only non-fatal errors. This distinction has to be made because
4770 the error buffer may already be filled with fatal errors we don't want to
4771 print due to the different instruction templates. */
4774 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4776 int largest_error_pos;
4777 const char *msg = NULL;
4778 enum aarch64_operand_error_kind kind;
4779 operand_error_record *curr;
4780 operand_error_record *head = operand_error_report.head;
4781 operand_error_record *record = NULL;
4783 /* No error to report. */
4787 gas_assert (head != NULL && operand_error_report.tail != NULL);
4789 /* Only one error. */
4790 if (head == operand_error_report.tail)
4792 /* If the only error is a non-fatal one and we don't want to print it,
4794 if (!non_fatal_only || head->detail.non_fatal)
4796 DEBUG_TRACE ("single opcode entry with error kind: %s",
4797 operand_mismatch_kind_names[head->detail.kind]);
4798 output_operand_error_record (head, str);
4803 /* Find the error kind of the highest severity. */
4804 DEBUG_TRACE ("multiple opcode entries with error kind");
4805 kind = AARCH64_OPDE_NIL;
4806 for (curr = head; curr != NULL; curr = curr->next)
4808 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4809 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4810 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4811 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4812 kind = curr->detail.kind;
4815 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4817 /* Pick up one of errors of KIND to report. */
4818 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4819 for (curr = head; curr != NULL; curr = curr->next)
4821 /* If we don't want to print non-fatal errors then don't consider them
4823 if (curr->detail.kind != kind
4824 || (non_fatal_only && !curr->detail.non_fatal))
4826 /* If there are multiple errors, pick up the one with the highest
4827 mismatching operand index. In the case of multiple errors with
4828 the equally highest operand index, pick up the first one or the
4829 first one with non-NULL error message. */
4830 if (curr->detail.index > largest_error_pos
4831 || (curr->detail.index == largest_error_pos && msg == NULL
4832 && curr->detail.error != NULL))
4834 largest_error_pos = curr->detail.index;
4836 msg = record->detail.error;
4840 /* The way errors are collected in the back-end is a bit non-intuitive. But
4841 essentially, because each operand template is tried recursively you may
4842 always have errors collected from the previous tried OPND. These are
4843 usually skipped if there is one successful match. However now with the
4844 non-fatal errors we have to ignore those previously collected hard errors
4845 when we're only interested in printing the non-fatal ones. This condition
4846 prevents us from printing errors that are not appropriate, since we did
4847 match a condition, but it also has warnings that it wants to print. */
4848 if (non_fatal_only && !record)
4851 gas_assert (largest_error_pos != -2 && record != NULL);
4852 DEBUG_TRACE ("Pick up error kind %s to report",
4853 operand_mismatch_kind_names[record->detail.kind]);
4856 output_operand_error_record (record, str);
4859 /* Write an AARCH64 instruction to buf - always little-endian. */
4861 put_aarch64_insn (char *buf, uint32_t insn)
4863 unsigned char *where = (unsigned char *) buf;
4865 where[1] = insn >> 8;
4866 where[2] = insn >> 16;
4867 where[3] = insn >> 24;
4871 get_aarch64_insn (char *buf)
4873 unsigned char *where = (unsigned char *) buf;
4875 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4880 output_inst (struct aarch64_inst *new_inst)
4884 to = frag_more (INSN_SIZE);
4886 frag_now->tc_frag_data.recorded = 1;
4888 put_aarch64_insn (to, inst.base.value);
4890 if (inst.reloc.type != BFD_RELOC_UNUSED)
4892 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4893 INSN_SIZE, &inst.reloc.exp,
4896 DEBUG_TRACE ("Prepared relocation fix up");
4897 /* Don't check the addend value against the instruction size,
4898 that's the job of our code in md_apply_fix(). */
4899 fixp->fx_no_overflow = 1;
4900 if (new_inst != NULL)
4901 fixp->tc_fix_data.inst = new_inst;
4902 if (aarch64_gas_internal_fixup_p ())
4904 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4905 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4906 fixp->fx_addnumber = inst.reloc.flags;
4910 dwarf2_emit_insn (INSN_SIZE);
4913 /* Link together opcodes of the same name. */
4917 aarch64_opcode *opcode;
4918 struct templates *next;
4921 typedef struct templates templates;
4924 lookup_mnemonic (const char *start, int len)
4926 templates *templ = NULL;
4928 templ = hash_find_n (aarch64_ops_hsh, start, len);
4932 /* Subroutine of md_assemble, responsible for looking up the primary
4933 opcode from the mnemonic the user wrote. STR points to the
4934 beginning of the mnemonic. */
4937 opcode_lookup (char **str)
4939 char *end, *base, *dot;
4940 const aarch64_cond *cond;
4944 /* Scan up to the end of the mnemonic, which must end in white space,
4945 '.', or end of string. */
4947 for (base = end = *str; is_part_of_name(*end); end++)
4948 if (*end == '.' && !dot)
4951 if (end == base || dot == base)
4954 inst.cond = COND_ALWAYS;
4956 /* Handle a possible condition. */
4959 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
4962 inst.cond = cond->value;
4978 if (inst.cond == COND_ALWAYS)
4980 /* Look for unaffixed mnemonic. */
4981 return lookup_mnemonic (base, len);
4985 /* append ".c" to mnemonic if conditional */
4986 memcpy (condname, base, len);
4987 memcpy (condname + len, ".c", 2);
4990 return lookup_mnemonic (base, len);
4996 /* Internal helper routine converting a vector_type_el structure *VECTYPE
4997 to a corresponding operand qualifier. */
4999 static inline aarch64_opnd_qualifier_t
5000 vectype_to_qualifier (const struct vector_type_el *vectype)
5002 /* Element size in bytes indexed by vector_el_type. */
5003 const unsigned char ele_size[5]
5005 const unsigned int ele_base [5] =
5007 AARCH64_OPND_QLF_V_4B,
5008 AARCH64_OPND_QLF_V_2H,
5009 AARCH64_OPND_QLF_V_2S,
5010 AARCH64_OPND_QLF_V_1D,
5011 AARCH64_OPND_QLF_V_1Q
5014 if (!vectype->defined || vectype->type == NT_invtype)
5015 goto vectype_conversion_fail;
5017 if (vectype->type == NT_zero)
5018 return AARCH64_OPND_QLF_P_Z;
5019 if (vectype->type == NT_merge)
5020 return AARCH64_OPND_QLF_P_M;
5022 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5024 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5026 /* Special case S_4B. */
5027 if (vectype->type == NT_b && vectype->width == 4)
5028 return AARCH64_OPND_QLF_S_4B;
5030 /* Vector element register. */
5031 return AARCH64_OPND_QLF_S_B + vectype->type;
5035 /* Vector register. */
5036 int reg_size = ele_size[vectype->type] * vectype->width;
5039 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5040 goto vectype_conversion_fail;
5042 /* The conversion is by calculating the offset from the base operand
5043 qualifier for the vector type. The operand qualifiers are regular
5044 enough that the offset can established by shifting the vector width by
5045 a vector-type dependent amount. */
5047 if (vectype->type == NT_b)
5049 else if (vectype->type == NT_h || vectype->type == NT_s)
5051 else if (vectype->type >= NT_d)
5056 offset = ele_base [vectype->type] + (vectype->width >> shift);
5057 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5058 && offset <= AARCH64_OPND_QLF_V_1Q);
5062 vectype_conversion_fail:
5063 first_error (_("bad vector arrangement type"));
5064 return AARCH64_OPND_QLF_NIL;
5067 /* Process an optional operand that is found omitted from the assembly line.
5068 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5069 instruction's opcode entry while IDX is the index of this omitted operand.
5073 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5074 int idx, aarch64_opnd_info *operand)
5076 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5077 gas_assert (optional_operand_p (opcode, idx));
5078 gas_assert (!operand->present);
5082 case AARCH64_OPND_Rd:
5083 case AARCH64_OPND_Rn:
5084 case AARCH64_OPND_Rm:
5085 case AARCH64_OPND_Rt:
5086 case AARCH64_OPND_Rt2:
5087 case AARCH64_OPND_Rs:
5088 case AARCH64_OPND_Ra:
5089 case AARCH64_OPND_Rt_SYS:
5090 case AARCH64_OPND_Rd_SP:
5091 case AARCH64_OPND_Rn_SP:
5092 case AARCH64_OPND_Rm_SP:
5093 case AARCH64_OPND_Fd:
5094 case AARCH64_OPND_Fn:
5095 case AARCH64_OPND_Fm:
5096 case AARCH64_OPND_Fa:
5097 case AARCH64_OPND_Ft:
5098 case AARCH64_OPND_Ft2:
5099 case AARCH64_OPND_Sd:
5100 case AARCH64_OPND_Sn:
5101 case AARCH64_OPND_Sm:
5102 case AARCH64_OPND_Va:
5103 case AARCH64_OPND_Vd:
5104 case AARCH64_OPND_Vn:
5105 case AARCH64_OPND_Vm:
5106 case AARCH64_OPND_VdD1:
5107 case AARCH64_OPND_VnD1:
5108 operand->reg.regno = default_value;
5111 case AARCH64_OPND_Ed:
5112 case AARCH64_OPND_En:
5113 case AARCH64_OPND_Em:
5114 case AARCH64_OPND_Em16:
5115 case AARCH64_OPND_SM3_IMM2:
5116 operand->reglane.regno = default_value;
5119 case AARCH64_OPND_IDX:
5120 case AARCH64_OPND_BIT_NUM:
5121 case AARCH64_OPND_IMMR:
5122 case AARCH64_OPND_IMMS:
5123 case AARCH64_OPND_SHLL_IMM:
5124 case AARCH64_OPND_IMM_VLSL:
5125 case AARCH64_OPND_IMM_VLSR:
5126 case AARCH64_OPND_CCMP_IMM:
5127 case AARCH64_OPND_FBITS:
5128 case AARCH64_OPND_UIMM4:
5129 case AARCH64_OPND_UIMM3_OP1:
5130 case AARCH64_OPND_UIMM3_OP2:
5131 case AARCH64_OPND_IMM:
5132 case AARCH64_OPND_IMM_2:
5133 case AARCH64_OPND_WIDTH:
5134 case AARCH64_OPND_UIMM7:
5135 case AARCH64_OPND_NZCV:
5136 case AARCH64_OPND_SVE_PATTERN:
5137 case AARCH64_OPND_SVE_PRFOP:
5138 operand->imm.value = default_value;
5141 case AARCH64_OPND_SVE_PATTERN_SCALED:
5142 operand->imm.value = default_value;
5143 operand->shifter.kind = AARCH64_MOD_MUL;
5144 operand->shifter.amount = 1;
5147 case AARCH64_OPND_EXCEPTION:
5148 inst.reloc.type = BFD_RELOC_UNUSED;
5151 case AARCH64_OPND_BARRIER_ISB:
5152 operand->barrier = aarch64_barrier_options + default_value;
5159 /* Process the relocation type for move wide instructions.
5160 Return TRUE on success; otherwise return FALSE. */
5163 process_movw_reloc_info (void)
5168 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5170 if (inst.base.opcode->op == OP_MOVK)
5171 switch (inst.reloc.type)
5173 case BFD_RELOC_AARCH64_MOVW_G0_S:
5174 case BFD_RELOC_AARCH64_MOVW_G1_S:
5175 case BFD_RELOC_AARCH64_MOVW_G2_S:
5176 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5177 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5178 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5179 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5180 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5181 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5182 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5183 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5185 (_("the specified relocation type is not allowed for MOVK"));
5191 switch (inst.reloc.type)
5193 case BFD_RELOC_AARCH64_MOVW_G0:
5194 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5195 case BFD_RELOC_AARCH64_MOVW_G0_S:
5196 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5197 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5198 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5199 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5200 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5201 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5202 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5203 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5204 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5205 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5208 case BFD_RELOC_AARCH64_MOVW_G1:
5209 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5210 case BFD_RELOC_AARCH64_MOVW_G1_S:
5211 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5212 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5213 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5214 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5215 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5216 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5217 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5218 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5219 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5220 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5223 case BFD_RELOC_AARCH64_MOVW_G2:
5224 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5225 case BFD_RELOC_AARCH64_MOVW_G2_S:
5226 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5227 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5228 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5229 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5232 set_fatal_syntax_error
5233 (_("the specified relocation type is not allowed for 32-bit "
5239 case BFD_RELOC_AARCH64_MOVW_G3:
5240 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5243 set_fatal_syntax_error
5244 (_("the specified relocation type is not allowed for 32-bit "
5251 /* More cases should be added when more MOVW-related relocation types
5252 are supported in GAS. */
5253 gas_assert (aarch64_gas_internal_fixup_p ());
5254 /* The shift amount should have already been set by the parser. */
5257 inst.base.operands[1].shifter.amount = shift;
5261 /* A primitive log calculator. */
5263 static inline unsigned int
5264 get_logsz (unsigned int size)
5266 const unsigned char ls[16] =
5267 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5273 gas_assert (ls[size - 1] != (unsigned char)-1);
5274 return ls[size - 1];
5277 /* Determine and return the real reloc type code for an instruction
5278 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5280 static inline bfd_reloc_code_real_type
5281 ldst_lo12_determine_real_reloc_type (void)
5284 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5285 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5287 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5289 BFD_RELOC_AARCH64_LDST8_LO12,
5290 BFD_RELOC_AARCH64_LDST16_LO12,
5291 BFD_RELOC_AARCH64_LDST32_LO12,
5292 BFD_RELOC_AARCH64_LDST64_LO12,
5293 BFD_RELOC_AARCH64_LDST128_LO12
5296 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5297 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5298 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5299 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5300 BFD_RELOC_AARCH64_NONE
5303 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5304 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5305 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5306 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5307 BFD_RELOC_AARCH64_NONE
5310 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5311 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5312 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5313 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5314 BFD_RELOC_AARCH64_NONE
5317 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5318 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5319 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5320 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5321 BFD_RELOC_AARCH64_NONE
5325 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5326 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5328 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5330 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5332 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5333 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5335 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5337 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5339 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5341 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5342 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5343 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5344 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5345 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5346 gas_assert (logsz <= 3);
5348 gas_assert (logsz <= 4);
5350 /* In reloc.c, these pseudo relocation types should be defined in similar
5351 order as above reloc_ldst_lo12 array. Because the array index calculation
5352 below relies on this. */
5353 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5356 /* Check whether a register list REGINFO is valid. The registers must be
5357 numbered in increasing order (modulo 32), in increments of one or two.
5359 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5362 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5365 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5367 uint32_t i, nb_regs, prev_regno, incr;
5369 nb_regs = 1 + (reginfo & 0x3);
5371 prev_regno = reginfo & 0x1f;
5372 incr = accept_alternate ? 2 : 1;
5374 for (i = 1; i < nb_regs; ++i)
5376 uint32_t curr_regno;
5378 curr_regno = reginfo & 0x1f;
5379 if (curr_regno != ((prev_regno + incr) & 0x1f))
5381 prev_regno = curr_regno;
5387 /* Generic instruction operand parser. This does no encoding and no
5388 semantic validation; it merely squirrels values away in the inst
5389 structure. Returns TRUE or FALSE depending on whether the
5390 specified grammar matched. */
5393 parse_operands (char *str, const aarch64_opcode *opcode)
5396 char *backtrack_pos = 0;
5397 const enum aarch64_opnd *operands = opcode->operands;
5398 aarch64_reg_type imm_reg_type;
5401 skip_whitespace (str);
5403 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5404 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5406 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5408 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5411 const reg_entry *reg;
5412 int comma_skipped_p = 0;
5413 aarch64_reg_type rtype;
5414 struct vector_type_el vectype;
5415 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5416 aarch64_opnd_info *info = &inst.base.operands[i];
5417 aarch64_reg_type reg_type;
5419 DEBUG_TRACE ("parse operand %d", i);
5421 /* Assign the operand code. */
5422 info->type = operands[i];
5424 if (optional_operand_p (opcode, i))
5426 /* Remember where we are in case we need to backtrack. */
5427 gas_assert (!backtrack_pos);
5428 backtrack_pos = str;
5431 /* Expect comma between operands; the backtrack mechanism will take
5432 care of cases of omitted optional operand. */
5433 if (i > 0 && ! skip_past_char (&str, ','))
5435 set_syntax_error (_("comma expected between operands"));
5439 comma_skipped_p = 1;
5441 switch (operands[i])
5443 case AARCH64_OPND_Rd:
5444 case AARCH64_OPND_Rn:
5445 case AARCH64_OPND_Rm:
5446 case AARCH64_OPND_Rt:
5447 case AARCH64_OPND_Rt2:
5448 case AARCH64_OPND_Rs:
5449 case AARCH64_OPND_Ra:
5450 case AARCH64_OPND_Rt_SYS:
5451 case AARCH64_OPND_PAIRREG:
5452 case AARCH64_OPND_SVE_Rm:
5453 po_int_reg_or_fail (REG_TYPE_R_Z);
5456 case AARCH64_OPND_Rd_SP:
5457 case AARCH64_OPND_Rn_SP:
5458 case AARCH64_OPND_SVE_Rn_SP:
5459 case AARCH64_OPND_Rm_SP:
5460 po_int_reg_or_fail (REG_TYPE_R_SP);
5463 case AARCH64_OPND_Rm_EXT:
5464 case AARCH64_OPND_Rm_SFT:
5465 po_misc_or_fail (parse_shifter_operand
5466 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5468 : SHIFTED_LOGIC_IMM)));
5469 if (!info->shifter.operator_present)
5471 /* Default to LSL if not present. Libopcodes prefers shifter
5472 kind to be explicit. */
5473 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5474 info->shifter.kind = AARCH64_MOD_LSL;
5475 /* For Rm_EXT, libopcodes will carry out further check on whether
5476 or not stack pointer is used in the instruction (Recall that
5477 "the extend operator is not optional unless at least one of
5478 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5482 case AARCH64_OPND_Fd:
5483 case AARCH64_OPND_Fn:
5484 case AARCH64_OPND_Fm:
5485 case AARCH64_OPND_Fa:
5486 case AARCH64_OPND_Ft:
5487 case AARCH64_OPND_Ft2:
5488 case AARCH64_OPND_Sd:
5489 case AARCH64_OPND_Sn:
5490 case AARCH64_OPND_Sm:
5491 case AARCH64_OPND_SVE_VZn:
5492 case AARCH64_OPND_SVE_Vd:
5493 case AARCH64_OPND_SVE_Vm:
5494 case AARCH64_OPND_SVE_Vn:
5495 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5496 if (val == PARSE_FAIL)
5498 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5501 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5503 info->reg.regno = val;
5504 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5507 case AARCH64_OPND_SVE_Pd:
5508 case AARCH64_OPND_SVE_Pg3:
5509 case AARCH64_OPND_SVE_Pg4_5:
5510 case AARCH64_OPND_SVE_Pg4_10:
5511 case AARCH64_OPND_SVE_Pg4_16:
5512 case AARCH64_OPND_SVE_Pm:
5513 case AARCH64_OPND_SVE_Pn:
5514 case AARCH64_OPND_SVE_Pt:
5515 reg_type = REG_TYPE_PN;
5518 case AARCH64_OPND_SVE_Za_5:
5519 case AARCH64_OPND_SVE_Za_16:
5520 case AARCH64_OPND_SVE_Zd:
5521 case AARCH64_OPND_SVE_Zm_5:
5522 case AARCH64_OPND_SVE_Zm_16:
5523 case AARCH64_OPND_SVE_Zn:
5524 case AARCH64_OPND_SVE_Zt:
5525 reg_type = REG_TYPE_ZN;
5528 case AARCH64_OPND_Va:
5529 case AARCH64_OPND_Vd:
5530 case AARCH64_OPND_Vn:
5531 case AARCH64_OPND_Vm:
5532 reg_type = REG_TYPE_VN;
5534 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5535 if (val == PARSE_FAIL)
5537 first_error (_(get_reg_expected_msg (reg_type)));
5540 if (vectype.defined & NTA_HASINDEX)
5543 info->reg.regno = val;
5544 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5545 && vectype.type == NT_invtype)
5546 /* Unqualified Pn and Zn registers are allowed in certain
5547 contexts. Rely on F_STRICT qualifier checking to catch
5549 info->qualifier = AARCH64_OPND_QLF_NIL;
5552 info->qualifier = vectype_to_qualifier (&vectype);
5553 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5558 case AARCH64_OPND_VdD1:
5559 case AARCH64_OPND_VnD1:
5560 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5561 if (val == PARSE_FAIL)
5563 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5566 if (vectype.type != NT_d || vectype.index != 1)
5568 set_fatal_syntax_error
5569 (_("the top half of a 128-bit FP/SIMD register is expected"));
5572 info->reg.regno = val;
5573 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5574 here; it is correct for the purpose of encoding/decoding since
5575 only the register number is explicitly encoded in the related
5576 instructions, although this appears a bit hacky. */
5577 info->qualifier = AARCH64_OPND_QLF_S_D;
5580 case AARCH64_OPND_SVE_Zm3_INDEX:
5581 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5582 case AARCH64_OPND_SVE_Zm4_INDEX:
5583 case AARCH64_OPND_SVE_Zn_INDEX:
5584 reg_type = REG_TYPE_ZN;
5585 goto vector_reg_index;
5587 case AARCH64_OPND_Ed:
5588 case AARCH64_OPND_En:
5589 case AARCH64_OPND_Em:
5590 case AARCH64_OPND_Em16:
5591 case AARCH64_OPND_SM3_IMM2:
5592 reg_type = REG_TYPE_VN;
5594 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5595 if (val == PARSE_FAIL)
5597 first_error (_(get_reg_expected_msg (reg_type)));
5600 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5603 info->reglane.regno = val;
5604 info->reglane.index = vectype.index;
5605 info->qualifier = vectype_to_qualifier (&vectype);
5606 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5610 case AARCH64_OPND_SVE_ZnxN:
5611 case AARCH64_OPND_SVE_ZtxN:
5612 reg_type = REG_TYPE_ZN;
5613 goto vector_reg_list;
5615 case AARCH64_OPND_LVn:
5616 case AARCH64_OPND_LVt:
5617 case AARCH64_OPND_LVt_AL:
5618 case AARCH64_OPND_LEt:
5619 reg_type = REG_TYPE_VN;
5621 if (reg_type == REG_TYPE_ZN
5622 && get_opcode_dependent_value (opcode) == 1
5625 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5626 if (val == PARSE_FAIL)
5628 first_error (_(get_reg_expected_msg (reg_type)));
5631 info->reglist.first_regno = val;
5632 info->reglist.num_regs = 1;
5636 val = parse_vector_reg_list (&str, reg_type, &vectype);
5637 if (val == PARSE_FAIL)
5639 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5641 set_fatal_syntax_error (_("invalid register list"));
5644 info->reglist.first_regno = (val >> 2) & 0x1f;
5645 info->reglist.num_regs = (val & 0x3) + 1;
5647 if (operands[i] == AARCH64_OPND_LEt)
5649 if (!(vectype.defined & NTA_HASINDEX))
5651 info->reglist.has_index = 1;
5652 info->reglist.index = vectype.index;
5656 if (vectype.defined & NTA_HASINDEX)
5658 if (!(vectype.defined & NTA_HASTYPE))
5660 if (reg_type == REG_TYPE_ZN)
5661 set_fatal_syntax_error (_("missing type suffix"));
5665 info->qualifier = vectype_to_qualifier (&vectype);
5666 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5670 case AARCH64_OPND_CRn:
5671 case AARCH64_OPND_CRm:
5673 char prefix = *(str++);
5674 if (prefix != 'c' && prefix != 'C')
5677 po_imm_nc_or_fail ();
5680 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5683 info->qualifier = AARCH64_OPND_QLF_CR;
5684 info->imm.value = val;
5688 case AARCH64_OPND_SHLL_IMM:
5689 case AARCH64_OPND_IMM_VLSR:
5690 po_imm_or_fail (1, 64);
5691 info->imm.value = val;
5694 case AARCH64_OPND_CCMP_IMM:
5695 case AARCH64_OPND_SIMM5:
5696 case AARCH64_OPND_FBITS:
5697 case AARCH64_OPND_UIMM4:
5698 case AARCH64_OPND_UIMM3_OP1:
5699 case AARCH64_OPND_UIMM3_OP2:
5700 case AARCH64_OPND_IMM_VLSL:
5701 case AARCH64_OPND_IMM:
5702 case AARCH64_OPND_IMM_2:
5703 case AARCH64_OPND_WIDTH:
5704 case AARCH64_OPND_SVE_INV_LIMM:
5705 case AARCH64_OPND_SVE_LIMM:
5706 case AARCH64_OPND_SVE_LIMM_MOV:
5707 case AARCH64_OPND_SVE_SHLIMM_PRED:
5708 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5709 case AARCH64_OPND_SVE_SHRIMM_PRED:
5710 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5711 case AARCH64_OPND_SVE_SIMM5:
5712 case AARCH64_OPND_SVE_SIMM5B:
5713 case AARCH64_OPND_SVE_SIMM6:
5714 case AARCH64_OPND_SVE_SIMM8:
5715 case AARCH64_OPND_SVE_UIMM3:
5716 case AARCH64_OPND_SVE_UIMM7:
5717 case AARCH64_OPND_SVE_UIMM8:
5718 case AARCH64_OPND_SVE_UIMM8_53:
5719 case AARCH64_OPND_IMM_ROT1:
5720 case AARCH64_OPND_IMM_ROT2:
5721 case AARCH64_OPND_IMM_ROT3:
5722 case AARCH64_OPND_SVE_IMM_ROT1:
5723 case AARCH64_OPND_SVE_IMM_ROT2:
5724 po_imm_nc_or_fail ();
5725 info->imm.value = val;
5728 case AARCH64_OPND_SVE_AIMM:
5729 case AARCH64_OPND_SVE_ASIMM:
5730 po_imm_nc_or_fail ();
5731 info->imm.value = val;
5732 skip_whitespace (str);
5733 if (skip_past_comma (&str))
5734 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5736 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5739 case AARCH64_OPND_SVE_PATTERN:
5740 po_enum_or_fail (aarch64_sve_pattern_array);
5741 info->imm.value = val;
5744 case AARCH64_OPND_SVE_PATTERN_SCALED:
5745 po_enum_or_fail (aarch64_sve_pattern_array);
5746 info->imm.value = val;
5747 if (skip_past_comma (&str)
5748 && !parse_shift (&str, info, SHIFTED_MUL))
5750 if (!info->shifter.operator_present)
5752 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5753 info->shifter.kind = AARCH64_MOD_MUL;
5754 info->shifter.amount = 1;
5758 case AARCH64_OPND_SVE_PRFOP:
5759 po_enum_or_fail (aarch64_sve_prfop_array);
5760 info->imm.value = val;
5763 case AARCH64_OPND_UIMM7:
5764 po_imm_or_fail (0, 127);
5765 info->imm.value = val;
5768 case AARCH64_OPND_IDX:
5769 case AARCH64_OPND_MASK:
5770 case AARCH64_OPND_BIT_NUM:
5771 case AARCH64_OPND_IMMR:
5772 case AARCH64_OPND_IMMS:
5773 po_imm_or_fail (0, 63);
5774 info->imm.value = val;
5777 case AARCH64_OPND_IMM0:
5778 po_imm_nc_or_fail ();
5781 set_fatal_syntax_error (_("immediate zero expected"));
5784 info->imm.value = 0;
5787 case AARCH64_OPND_FPIMM0:
5790 bfd_boolean res1 = FALSE, res2 = FALSE;
5791 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5792 it is probably not worth the effort to support it. */
5793 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5796 || !(res2 = parse_constant_immediate (&str, &val,
5799 if ((res1 && qfloat == 0) || (res2 && val == 0))
5801 info->imm.value = 0;
5802 info->imm.is_fp = 1;
5805 set_fatal_syntax_error (_("immediate zero expected"));
5809 case AARCH64_OPND_IMM_MOV:
5812 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5813 reg_name_p (str, REG_TYPE_VN))
5816 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5818 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5819 later. fix_mov_imm_insn will try to determine a machine
5820 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5821 message if the immediate cannot be moved by a single
5823 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5824 inst.base.operands[i].skip = 1;
5828 case AARCH64_OPND_SIMD_IMM:
5829 case AARCH64_OPND_SIMD_IMM_SFT:
5830 if (! parse_big_immediate (&str, &val, imm_reg_type))
5832 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5834 /* need_libopcodes_p */ 1,
5837 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5838 shift, we don't check it here; we leave the checking to
5839 the libopcodes (operand_general_constraint_met_p). By
5840 doing this, we achieve better diagnostics. */
5841 if (skip_past_comma (&str)
5842 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5844 if (!info->shifter.operator_present
5845 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5847 /* Default to LSL if not present. Libopcodes prefers shifter
5848 kind to be explicit. */
5849 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5850 info->shifter.kind = AARCH64_MOD_LSL;
5854 case AARCH64_OPND_FPIMM:
5855 case AARCH64_OPND_SIMD_FPIMM:
5856 case AARCH64_OPND_SVE_FPIMM8:
5861 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5862 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5863 || !aarch64_imm_float_p (qfloat))
5866 set_fatal_syntax_error (_("invalid floating-point"
5870 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5871 inst.base.operands[i].imm.is_fp = 1;
5875 case AARCH64_OPND_SVE_I1_HALF_ONE:
5876 case AARCH64_OPND_SVE_I1_HALF_TWO:
5877 case AARCH64_OPND_SVE_I1_ZERO_ONE:
5882 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5883 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5886 set_fatal_syntax_error (_("invalid floating-point"
5890 inst.base.operands[i].imm.value = qfloat;
5891 inst.base.operands[i].imm.is_fp = 1;
5895 case AARCH64_OPND_LIMM:
5896 po_misc_or_fail (parse_shifter_operand (&str, info,
5897 SHIFTED_LOGIC_IMM));
5898 if (info->shifter.operator_present)
5900 set_fatal_syntax_error
5901 (_("shift not allowed for bitmask immediate"));
5904 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5906 /* need_libopcodes_p */ 1,
5910 case AARCH64_OPND_AIMM:
5911 if (opcode->op == OP_ADD)
5912 /* ADD may have relocation types. */
5913 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5914 SHIFTED_ARITH_IMM));
5916 po_misc_or_fail (parse_shifter_operand (&str, info,
5917 SHIFTED_ARITH_IMM));
5918 switch (inst.reloc.type)
5920 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5921 info->shifter.amount = 12;
5923 case BFD_RELOC_UNUSED:
5924 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5925 if (info->shifter.kind != AARCH64_MOD_NONE)
5926 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5927 inst.reloc.pc_rel = 0;
5932 info->imm.value = 0;
5933 if (!info->shifter.operator_present)
5935 /* Default to LSL if not present. Libopcodes prefers shifter
5936 kind to be explicit. */
5937 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5938 info->shifter.kind = AARCH64_MOD_LSL;
5942 case AARCH64_OPND_HALF:
5944 /* #<imm16> or relocation. */
5945 int internal_fixup_p;
5946 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5947 if (internal_fixup_p)
5948 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5949 skip_whitespace (str);
5950 if (skip_past_comma (&str))
5952 /* {, LSL #<shift>} */
5953 if (! aarch64_gas_internal_fixup_p ())
5955 set_fatal_syntax_error (_("can't mix relocation modifier "
5956 "with explicit shift"));
5959 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5962 inst.base.operands[i].shifter.amount = 0;
5963 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5964 inst.base.operands[i].imm.value = 0;
5965 if (! process_movw_reloc_info ())
5970 case AARCH64_OPND_EXCEPTION:
5971 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
5973 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5975 /* need_libopcodes_p */ 0,
5979 case AARCH64_OPND_NZCV:
5981 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5985 info->imm.value = nzcv->value;
5988 po_imm_or_fail (0, 15);
5989 info->imm.value = val;
5993 case AARCH64_OPND_COND:
5994 case AARCH64_OPND_COND1:
5999 while (ISALPHA (*str));
6000 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
6001 if (info->cond == NULL)
6003 set_syntax_error (_("invalid condition"));
6006 else if (operands[i] == AARCH64_OPND_COND1
6007 && (info->cond->value & 0xe) == 0xe)
6009 /* Do not allow AL or NV. */
6010 set_default_error ();
6016 case AARCH64_OPND_ADDR_ADRP:
6017 po_misc_or_fail (parse_adrp (&str));
6018 /* Clear the value as operand needs to be relocated. */
6019 info->imm.value = 0;
6022 case AARCH64_OPND_ADDR_PCREL14:
6023 case AARCH64_OPND_ADDR_PCREL19:
6024 case AARCH64_OPND_ADDR_PCREL21:
6025 case AARCH64_OPND_ADDR_PCREL26:
6026 po_misc_or_fail (parse_address (&str, info));
6027 if (!info->addr.pcrel)
6029 set_syntax_error (_("invalid pc-relative address"));
6032 if (inst.gen_lit_pool
6033 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6035 /* Only permit "=value" in the literal load instructions.
6036 The literal will be generated by programmer_friendly_fixup. */
6037 set_syntax_error (_("invalid use of \"=immediate\""));
6040 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6042 set_syntax_error (_("unrecognized relocation suffix"));
6045 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6047 info->imm.value = inst.reloc.exp.X_add_number;
6048 inst.reloc.type = BFD_RELOC_UNUSED;
6052 info->imm.value = 0;
6053 if (inst.reloc.type == BFD_RELOC_UNUSED)
6054 switch (opcode->iclass)
6058 /* e.g. CBZ or B.COND */
6059 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6060 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6064 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6065 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6069 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6071 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6072 : BFD_RELOC_AARCH64_JUMP26;
6075 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6076 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6079 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6080 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6086 inst.reloc.pc_rel = 1;
6090 case AARCH64_OPND_ADDR_SIMPLE:
6091 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6093 /* [<Xn|SP>{, #<simm>}] */
6095 /* First use the normal address-parsing routines, to get
6096 the usual syntax errors. */
6097 po_misc_or_fail (parse_address (&str, info));
6098 if (info->addr.pcrel || info->addr.offset.is_reg
6099 || !info->addr.preind || info->addr.postind
6100 || info->addr.writeback)
6102 set_syntax_error (_("invalid addressing mode"));
6106 /* Then retry, matching the specific syntax of these addresses. */
6108 po_char_or_fail ('[');
6109 po_reg_or_fail (REG_TYPE_R64_SP);
6110 /* Accept optional ", #0". */
6111 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6112 && skip_past_char (&str, ','))
6114 skip_past_char (&str, '#');
6115 if (! skip_past_char (&str, '0'))
6117 set_fatal_syntax_error
6118 (_("the optional immediate offset can only be 0"));
6122 po_char_or_fail (']');
6126 case AARCH64_OPND_ADDR_REGOFF:
6127 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6128 po_misc_or_fail (parse_address (&str, info));
6130 if (info->addr.pcrel || !info->addr.offset.is_reg
6131 || !info->addr.preind || info->addr.postind
6132 || info->addr.writeback)
6134 set_syntax_error (_("invalid addressing mode"));
6137 if (!info->shifter.operator_present)
6139 /* Default to LSL if not present. Libopcodes prefers shifter
6140 kind to be explicit. */
6141 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6142 info->shifter.kind = AARCH64_MOD_LSL;
6144 /* Qualifier to be deduced by libopcodes. */
6147 case AARCH64_OPND_ADDR_SIMM7:
6148 po_misc_or_fail (parse_address (&str, info));
6149 if (info->addr.pcrel || info->addr.offset.is_reg
6150 || (!info->addr.preind && !info->addr.postind))
6152 set_syntax_error (_("invalid addressing mode"));
6155 if (inst.reloc.type != BFD_RELOC_UNUSED)
6157 set_syntax_error (_("relocation not allowed"));
6160 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6162 /* need_libopcodes_p */ 1,
6166 case AARCH64_OPND_ADDR_SIMM9:
6167 case AARCH64_OPND_ADDR_SIMM9_2:
6168 po_misc_or_fail (parse_address (&str, info));
6169 if (info->addr.pcrel || info->addr.offset.is_reg
6170 || (!info->addr.preind && !info->addr.postind)
6171 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6172 && info->addr.writeback))
6174 set_syntax_error (_("invalid addressing mode"));
6177 if (inst.reloc.type != BFD_RELOC_UNUSED)
6179 set_syntax_error (_("relocation not allowed"));
6182 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6184 /* need_libopcodes_p */ 1,
6188 case AARCH64_OPND_ADDR_SIMM10:
6189 case AARCH64_OPND_ADDR_OFFSET:
6190 po_misc_or_fail (parse_address (&str, info));
6191 if (info->addr.pcrel || info->addr.offset.is_reg
6192 || !info->addr.preind || info->addr.postind)
6194 set_syntax_error (_("invalid addressing mode"));
6197 if (inst.reloc.type != BFD_RELOC_UNUSED)
6199 set_syntax_error (_("relocation not allowed"));
6202 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6204 /* need_libopcodes_p */ 1,
6208 case AARCH64_OPND_ADDR_UIMM12:
6209 po_misc_or_fail (parse_address (&str, info));
6210 if (info->addr.pcrel || info->addr.offset.is_reg
6211 || !info->addr.preind || info->addr.writeback)
6213 set_syntax_error (_("invalid addressing mode"));
6216 if (inst.reloc.type == BFD_RELOC_UNUSED)
6217 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6218 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6220 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6222 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6224 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6226 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6227 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6228 /* Leave qualifier to be determined by libopcodes. */
6231 case AARCH64_OPND_SIMD_ADDR_POST:
6232 /* [<Xn|SP>], <Xm|#<amount>> */
6233 po_misc_or_fail (parse_address (&str, info));
6234 if (!info->addr.postind || !info->addr.writeback)
6236 set_syntax_error (_("invalid addressing mode"));
6239 if (!info->addr.offset.is_reg)
6241 if (inst.reloc.exp.X_op == O_constant)
6242 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6245 set_fatal_syntax_error
6246 (_("writeback value must be an immediate constant"));
6253 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6254 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6255 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6256 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6257 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6258 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6259 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6260 case AARCH64_OPND_SVE_ADDR_RI_U6:
6261 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6262 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6263 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6264 /* [X<n>{, #imm, MUL VL}]
6266 but recognizing SVE registers. */
6267 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6268 &offset_qualifier));
6269 if (base_qualifier != AARCH64_OPND_QLF_X)
6271 set_syntax_error (_("invalid addressing mode"));
6275 if (info->addr.pcrel || info->addr.offset.is_reg
6276 || !info->addr.preind || info->addr.writeback)
6278 set_syntax_error (_("invalid addressing mode"));
6281 if (inst.reloc.type != BFD_RELOC_UNUSED
6282 || inst.reloc.exp.X_op != O_constant)
6284 /* Make sure this has priority over
6285 "invalid addressing mode". */
6286 set_fatal_syntax_error (_("constant offset required"));
6289 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6292 case AARCH64_OPND_SVE_ADDR_R:
6293 /* [<Xn|SP>{, <R><m>}]
6294 but recognizing SVE registers. */
6295 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6296 &offset_qualifier));
6297 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6299 offset_qualifier = AARCH64_OPND_QLF_X;
6300 info->addr.offset.is_reg = 1;
6301 info->addr.offset.regno = 31;
6303 else if (base_qualifier != AARCH64_OPND_QLF_X
6304 || offset_qualifier != AARCH64_OPND_QLF_X)
6306 set_syntax_error (_("invalid addressing mode"));
6311 case AARCH64_OPND_SVE_ADDR_RR:
6312 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6313 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6314 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6315 case AARCH64_OPND_SVE_ADDR_RX:
6316 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6317 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6318 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6319 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6320 but recognizing SVE registers. */
6321 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6322 &offset_qualifier));
6323 if (base_qualifier != AARCH64_OPND_QLF_X
6324 || offset_qualifier != AARCH64_OPND_QLF_X)
6326 set_syntax_error (_("invalid addressing mode"));
6331 case AARCH64_OPND_SVE_ADDR_RZ:
6332 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6333 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6334 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6335 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6336 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6337 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6338 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6339 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6340 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6341 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6342 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6343 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6344 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6345 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6346 &offset_qualifier));
6347 if (base_qualifier != AARCH64_OPND_QLF_X
6348 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6349 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6351 set_syntax_error (_("invalid addressing mode"));
6354 info->qualifier = offset_qualifier;
6357 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6358 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6359 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6360 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6361 /* [Z<n>.<T>{, #imm}] */
6362 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6363 &offset_qualifier));
6364 if (base_qualifier != AARCH64_OPND_QLF_S_S
6365 && base_qualifier != AARCH64_OPND_QLF_S_D)
6367 set_syntax_error (_("invalid addressing mode"));
6370 info->qualifier = base_qualifier;
6373 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6374 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6375 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6376 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6377 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6381 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6383 here since we get better error messages by leaving it to
6384 the qualifier checking routines. */
6385 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6386 &offset_qualifier));
6387 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6388 && base_qualifier != AARCH64_OPND_QLF_S_D)
6389 || offset_qualifier != base_qualifier)
6391 set_syntax_error (_("invalid addressing mode"));
6394 info->qualifier = base_qualifier;
6397 case AARCH64_OPND_SYSREG:
6399 uint32_t sysreg_flags;
6400 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6401 &sysreg_flags)) == PARSE_FAIL)
6403 set_syntax_error (_("unknown or missing system register name"));
6406 inst.base.operands[i].sysreg.value = val;
6407 inst.base.operands[i].sysreg.flags = sysreg_flags;
6411 case AARCH64_OPND_PSTATEFIELD:
6412 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6415 set_syntax_error (_("unknown or missing PSTATE field name"));
6418 inst.base.operands[i].pstatefield = val;
6421 case AARCH64_OPND_SYSREG_IC:
6422 inst.base.operands[i].sysins_op =
6423 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6425 case AARCH64_OPND_SYSREG_DC:
6426 inst.base.operands[i].sysins_op =
6427 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6429 case AARCH64_OPND_SYSREG_AT:
6430 inst.base.operands[i].sysins_op =
6431 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6433 case AARCH64_OPND_SYSREG_TLBI:
6434 inst.base.operands[i].sysins_op =
6435 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6437 if (inst.base.operands[i].sysins_op == NULL)
6439 set_fatal_syntax_error ( _("unknown or missing operation name"));
6444 case AARCH64_OPND_BARRIER:
6445 case AARCH64_OPND_BARRIER_ISB:
6446 val = parse_barrier (&str);
6447 if (val != PARSE_FAIL
6448 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6450 /* ISB only accepts options name 'sy'. */
6452 (_("the specified option is not accepted in ISB"));
6453 /* Turn off backtrack as this optional operand is present. */
6457 /* This is an extension to accept a 0..15 immediate. */
6458 if (val == PARSE_FAIL)
6459 po_imm_or_fail (0, 15);
6460 info->barrier = aarch64_barrier_options + val;
6463 case AARCH64_OPND_PRFOP:
6464 val = parse_pldop (&str);
6465 /* This is an extension to accept a 0..31 immediate. */
6466 if (val == PARSE_FAIL)
6467 po_imm_or_fail (0, 31);
6468 inst.base.operands[i].prfop = aarch64_prfops + val;
6471 case AARCH64_OPND_BARRIER_PSB:
6472 val = parse_barrier_psb (&str, &(info->hint_option));
6473 if (val == PARSE_FAIL)
6478 as_fatal (_("unhandled operand code %d"), operands[i]);
6481 /* If we get here, this operand was successfully parsed. */
6482 inst.base.operands[i].present = 1;
6486 /* The parse routine should already have set the error, but in case
6487 not, set a default one here. */
6489 set_default_error ();
6491 if (! backtrack_pos)
6492 goto parse_operands_return;
6495 /* We reach here because this operand is marked as optional, and
6496 either no operand was supplied or the operand was supplied but it
6497 was syntactically incorrect. In the latter case we report an
6498 error. In the former case we perform a few more checks before
6499 dropping through to the code to insert the default operand. */
6501 char *tmp = backtrack_pos;
6502 char endchar = END_OF_INSN;
6504 if (i != (aarch64_num_of_operands (opcode) - 1))
6506 skip_past_char (&tmp, ',');
6508 if (*tmp != endchar)
6509 /* The user has supplied an operand in the wrong format. */
6510 goto parse_operands_return;
6512 /* Make sure there is not a comma before the optional operand.
6513 For example the fifth operand of 'sys' is optional:
6515 sys #0,c0,c0,#0, <--- wrong
6516 sys #0,c0,c0,#0 <--- correct. */
6517 if (comma_skipped_p && i && endchar == END_OF_INSN)
6519 set_fatal_syntax_error
6520 (_("unexpected comma before the omitted optional operand"));
6521 goto parse_operands_return;
6525 /* Reaching here means we are dealing with an optional operand that is
6526 omitted from the assembly line. */
6527 gas_assert (optional_operand_p (opcode, i));
6529 process_omitted_operand (operands[i], opcode, i, info);
6531 /* Try again, skipping the optional operand at backtrack_pos. */
6532 str = backtrack_pos;
6535 /* Clear any error record after the omitted optional operand has been
6536 successfully handled. */
6540 /* Check if we have parsed all the operands. */
6541 if (*str != '\0' && ! error_p ())
6543 /* Set I to the index of the last present operand; this is
6544 for the purpose of diagnostics. */
6545 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6547 set_fatal_syntax_error
6548 (_("unexpected characters following instruction"));
6551 parse_operands_return:
6555 DEBUG_TRACE ("parsing FAIL: %s - %s",
6556 operand_mismatch_kind_names[get_error_kind ()],
6557 get_error_message ());
6558 /* Record the operand error properly; this is useful when there
6559 are multiple instruction templates for a mnemonic name, so that
6560 later on, we can select the error that most closely describes
6562 record_operand_error (opcode, i, get_error_kind (),
6563 get_error_message ());
6568 DEBUG_TRACE ("parsing SUCCESS");
6573 /* It does some fix-up to provide some programmer friendly feature while
6574 keeping the libopcodes happy, i.e. libopcodes only accepts
6575 the preferred architectural syntax.
6576 Return FALSE if there is any failure; otherwise return TRUE. */
6579 programmer_friendly_fixup (aarch64_instruction *instr)
6581 aarch64_inst *base = &instr->base;
6582 const aarch64_opcode *opcode = base->opcode;
6583 enum aarch64_op op = opcode->op;
6584 aarch64_opnd_info *operands = base->operands;
6586 DEBUG_TRACE ("enter");
6588 switch (opcode->iclass)
6591 /* TBNZ Xn|Wn, #uimm6, label
6592 Test and Branch Not Zero: conditionally jumps to label if bit number
6593 uimm6 in register Xn is not zero. The bit number implies the width of
6594 the register, which may be written and should be disassembled as Wn if
6595 uimm is less than 32. */
6596 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6598 if (operands[1].imm.value >= 32)
6600 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6604 operands[0].qualifier = AARCH64_OPND_QLF_X;
6608 /* LDR Wt, label | =value
6609 As a convenience assemblers will typically permit the notation
6610 "=value" in conjunction with the pc-relative literal load instructions
6611 to automatically place an immediate value or symbolic address in a
6612 nearby literal pool and generate a hidden label which references it.
6613 ISREG has been set to 0 in the case of =value. */
6614 if (instr->gen_lit_pool
6615 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6617 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6618 if (op == OP_LDRSW_LIT)
6620 if (instr->reloc.exp.X_op != O_constant
6621 && instr->reloc.exp.X_op != O_big
6622 && instr->reloc.exp.X_op != O_symbol)
6624 record_operand_error (opcode, 1,
6625 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6626 _("constant expression expected"));
6629 if (! add_to_lit_pool (&instr->reloc.exp, size))
6631 record_operand_error (opcode, 1,
6632 AARCH64_OPDE_OTHER_ERROR,
6633 _("literal pool insertion failed"));
6641 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6642 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6643 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6644 A programmer-friendly assembler should accept a destination Xd in
6645 place of Wd, however that is not the preferred form for disassembly.
6647 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6648 && operands[1].qualifier == AARCH64_OPND_QLF_W
6649 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6650 operands[0].qualifier = AARCH64_OPND_QLF_W;
6655 /* In the 64-bit form, the final register operand is written as Wm
6656 for all but the (possibly omitted) UXTX/LSL and SXTX
6658 As a programmer-friendly assembler, we accept e.g.
6659 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6660 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6661 int idx = aarch64_operand_index (opcode->operands,
6662 AARCH64_OPND_Rm_EXT);
6663 gas_assert (idx == 1 || idx == 2);
6664 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6665 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6666 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6667 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6668 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6669 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6677 DEBUG_TRACE ("exit with SUCCESS");
6681 /* Check for loads and stores that will cause unpredictable behavior. */
6684 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6686 aarch64_inst *base = &instr->base;
6687 const aarch64_opcode *opcode = base->opcode;
6688 const aarch64_opnd_info *opnds = base->operands;
6689 switch (opcode->iclass)
6696 /* Loading/storing the base register is unpredictable if writeback. */
6697 if ((aarch64_get_operand_class (opnds[0].type)
6698 == AARCH64_OPND_CLASS_INT_REG)
6699 && opnds[0].reg.regno == opnds[1].addr.base_regno
6700 && opnds[1].addr.base_regno != REG_SP
6701 && opnds[1].addr.writeback)
6702 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6705 case ldstnapair_offs:
6706 case ldstpair_indexed:
6707 /* Loading/storing the base register is unpredictable if writeback. */
6708 if ((aarch64_get_operand_class (opnds[0].type)
6709 == AARCH64_OPND_CLASS_INT_REG)
6710 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6711 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6712 && opnds[2].addr.base_regno != REG_SP
6713 && opnds[2].addr.writeback)
6714 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6715 /* Load operations must load different registers. */
6716 if ((opcode->opcode & (1 << 22))
6717 && opnds[0].reg.regno == opnds[1].reg.regno)
6718 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6722 /* It is unpredictable if the destination and status registers are the
6724 if ((aarch64_get_operand_class (opnds[0].type)
6725 == AARCH64_OPND_CLASS_INT_REG)
6726 && (aarch64_get_operand_class (opnds[1].type)
6727 == AARCH64_OPND_CLASS_INT_REG)
6728 && (opnds[0].reg.regno == opnds[1].reg.regno
6729 || opnds[0].reg.regno == opnds[2].reg.regno))
6730 as_warn (_("unpredictable: identical transfer and status registers"
6742 force_automatic_sequence_close (void)
6744 if (now_instr_sequence.instr)
6746 as_warn (_("previous `%s' sequence has not been closed"),
6747 now_instr_sequence.instr->opcode->name);
6748 init_insn_sequence (NULL, &now_instr_sequence);
6752 /* A wrapper function to interface with libopcodes on encoding and
6753 record the error message if there is any.
6755 Return TRUE on success; otherwise return FALSE. */
6758 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6761 aarch64_operand_error error_info;
6762 memset (&error_info, '\0', sizeof (error_info));
6763 error_info.kind = AARCH64_OPDE_NIL;
6764 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
6765 && !error_info.non_fatal)
6768 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6769 record_operand_error_info (opcode, &error_info);
6770 return error_info.non_fatal;
6773 #ifdef DEBUG_AARCH64
6775 dump_opcode_operands (const aarch64_opcode *opcode)
6778 while (opcode->operands[i] != AARCH64_OPND_NIL)
6780 aarch64_verbose ("\t\t opnd%d: %s", i,
6781 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6782 ? aarch64_get_operand_name (opcode->operands[i])
6783 : aarch64_get_operand_desc (opcode->operands[i]));
6787 #endif /* DEBUG_AARCH64 */
6789 /* This is the guts of the machine-dependent assembler. STR points to a
6790 machine dependent instruction. This function is supposed to emit
6791 the frags/bytes it assembles to. */
6794 md_assemble (char *str)
6797 templates *template;
6798 aarch64_opcode *opcode;
6799 aarch64_inst *inst_base;
6800 unsigned saved_cond;
6802 /* Align the previous label if needed. */
6803 if (last_label_seen != NULL)
6805 symbol_set_frag (last_label_seen, frag_now);
6806 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6807 S_SET_SEGMENT (last_label_seen, now_seg);
6810 /* Update the current insn_sequence from the segment. */
6811 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
6813 inst.reloc.type = BFD_RELOC_UNUSED;
6815 DEBUG_TRACE ("\n\n");
6816 DEBUG_TRACE ("==============================");
6817 DEBUG_TRACE ("Enter md_assemble with %s", str);
6819 template = opcode_lookup (&p);
6822 /* It wasn't an instruction, but it might be a register alias of
6823 the form alias .req reg directive. */
6824 if (!create_register_alias (str, p))
6825 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6830 skip_whitespace (p);
6833 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6834 get_mnemonic_name (str), str);
6838 init_operand_error_report ();
6840 /* Sections are assumed to start aligned. In executable section, there is no
6841 MAP_DATA symbol pending. So we only align the address during
6842 MAP_DATA --> MAP_INSN transition.
6843 For other sections, this is not guaranteed. */
6844 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6845 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6846 frag_align_code (2, 0);
6848 saved_cond = inst.cond;
6849 reset_aarch64_instruction (&inst);
6850 inst.cond = saved_cond;
6852 /* Iterate through all opcode entries with the same mnemonic name. */
6855 opcode = template->opcode;
6857 DEBUG_TRACE ("opcode %s found", opcode->name);
6858 #ifdef DEBUG_AARCH64
6860 dump_opcode_operands (opcode);
6861 #endif /* DEBUG_AARCH64 */
6863 mapping_state (MAP_INSN);
6865 inst_base = &inst.base;
6866 inst_base->opcode = opcode;
6868 /* Truly conditionally executed instructions, e.g. b.cond. */
6869 if (opcode->flags & F_COND)
6871 gas_assert (inst.cond != COND_ALWAYS);
6872 inst_base->cond = get_cond_from_value (inst.cond);
6873 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6875 else if (inst.cond != COND_ALWAYS)
6877 /* It shouldn't arrive here, where the assembly looks like a
6878 conditional instruction but the found opcode is unconditional. */
6883 if (parse_operands (p, opcode)
6884 && programmer_friendly_fixup (&inst)
6885 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6887 /* Check that this instruction is supported for this CPU. */
6888 if (!opcode->avariant
6889 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6891 as_bad (_("selected processor does not support `%s'"), str);
6895 warn_unpredictable_ldst (&inst, str);
6897 if (inst.reloc.type == BFD_RELOC_UNUSED
6898 || !inst.reloc.need_libopcodes_p)
6902 /* If there is relocation generated for the instruction,
6903 store the instruction information for the future fix-up. */
6904 struct aarch64_inst *copy;
6905 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6906 copy = XNEW (struct aarch64_inst);
6907 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6911 /* Issue non-fatal messages if any. */
6912 output_operand_error_report (str, TRUE);
6916 template = template->next;
6917 if (template != NULL)
6919 reset_aarch64_instruction (&inst);
6920 inst.cond = saved_cond;
6923 while (template != NULL);
6925 /* Issue the error messages if any. */
6926 output_operand_error_report (str, FALSE);
6929 /* Various frobbings of labels and their addresses. */
6932 aarch64_start_line_hook (void)
6934 last_label_seen = NULL;
6938 aarch64_frob_label (symbolS * sym)
6940 last_label_seen = sym;
6942 dwarf2_emit_label (sym);
6946 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
6948 /* Check to see if we have a block to close. */
6949 force_automatic_sequence_close ();
6953 aarch64_data_in_code (void)
6955 if (!strncmp (input_line_pointer + 1, "data:", 5))
6957 *input_line_pointer = '/';
6958 input_line_pointer += 5;
6959 *input_line_pointer = 0;
6967 aarch64_canonicalize_symbol_name (char *name)
6971 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6972 *(name + len - 5) = 0;
6977 /* Table of all register names defined by default. The user can
6978 define additional names with .req. Note that all register names
6979 should appear in both upper and lowercase variants. Some registers
6980 also have mixed-case names. */
6982 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6983 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
6984 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6985 #define REGSET16(p,t) \
6986 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6987 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6988 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6989 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
6990 #define REGSET31(p,t) \
6992 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6993 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6994 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6995 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6996 #define REGSET(p,t) \
6997 REGSET31(p,t), REGNUM(p,31,t)
6999 /* These go into aarch64_reg_hsh hash-table. */
7000 static const reg_entry reg_names[] = {
7001 /* Integer registers. */
7002 REGSET31 (x, R_64), REGSET31 (X, R_64),
7003 REGSET31 (w, R_32), REGSET31 (W, R_32),
7005 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7006 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7007 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7008 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7009 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7010 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7012 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7013 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7015 /* Floating-point single precision registers. */
7016 REGSET (s, FP_S), REGSET (S, FP_S),
7018 /* Floating-point double precision registers. */
7019 REGSET (d, FP_D), REGSET (D, FP_D),
7021 /* Floating-point half precision registers. */
7022 REGSET (h, FP_H), REGSET (H, FP_H),
7024 /* Floating-point byte precision registers. */
7025 REGSET (b, FP_B), REGSET (B, FP_B),
7027 /* Floating-point quad precision registers. */
7028 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7030 /* FP/SIMD registers. */
7031 REGSET (v, VN), REGSET (V, VN),
7033 /* SVE vector registers. */
7034 REGSET (z, ZN), REGSET (Z, ZN),
7036 /* SVE predicate registers. */
7037 REGSET16 (p, PN), REGSET16 (P, PN)
7055 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7056 static const asm_nzcv nzcv_names[] = {
7057 {"nzcv", B (n, z, c, v)},
7058 {"nzcV", B (n, z, c, V)},
7059 {"nzCv", B (n, z, C, v)},
7060 {"nzCV", B (n, z, C, V)},
7061 {"nZcv", B (n, Z, c, v)},
7062 {"nZcV", B (n, Z, c, V)},
7063 {"nZCv", B (n, Z, C, v)},
7064 {"nZCV", B (n, Z, C, V)},
7065 {"Nzcv", B (N, z, c, v)},
7066 {"NzcV", B (N, z, c, V)},
7067 {"NzCv", B (N, z, C, v)},
7068 {"NzCV", B (N, z, C, V)},
7069 {"NZcv", B (N, Z, c, v)},
7070 {"NZcV", B (N, Z, c, V)},
7071 {"NZCv", B (N, Z, C, v)},
7072 {"NZCV", B (N, Z, C, V)}
7085 /* MD interface: bits in the object file. */
7087 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7088 for use in the a.out file, and stores them in the array pointed to by buf.
7089 This knows about the endian-ness of the target machine and does
7090 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7091 2 (short) and 4 (long) Floating numbers are put out as a series of
7092 LITTLENUMS (shorts, here at least). */
7095 md_number_to_chars (char *buf, valueT val, int n)
7097 if (target_big_endian)
7098 number_to_chars_bigendian (buf, val, n);
7100 number_to_chars_littleendian (buf, val, n);
7103 /* MD interface: Sections. */
7105 /* Estimate the size of a frag before relaxing. Assume everything fits in
7109 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7115 /* Round up a section size to the appropriate boundary. */
7118 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7123 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7124 of an rs_align_code fragment.
7126 Here we fill the frag with the appropriate info for padding the
7127 output stream. The resulting frag will consist of a fixed (fr_fix)
7128 and of a repeating (fr_var) part.
7130 The fixed content is always emitted before the repeating content and
7131 these two parts are used as follows in constructing the output:
7132 - the fixed part will be used to align to a valid instruction word
7133 boundary, in case that we start at a misaligned address; as no
7134 executable instruction can live at the misaligned location, we
7135 simply fill with zeros;
7136 - the variable part will be used to cover the remaining padding and
7137 we fill using the AArch64 NOP instruction.
7139 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7140 enough storage space for up to 3 bytes for padding the back to a valid
7141 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7144 aarch64_handle_align (fragS * fragP)
7146 /* NOP = d503201f */
7147 /* AArch64 instructions are always little-endian. */
7148 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7150 int bytes, fix, noop_size;
7153 if (fragP->fr_type != rs_align_code)
7156 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7157 p = fragP->fr_literal + fragP->fr_fix;
7160 gas_assert (fragP->tc_frag_data.recorded);
7163 noop_size = sizeof (aarch64_noop);
7165 fix = bytes & (noop_size - 1);
7169 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7173 fragP->fr_fix += fix;
7177 memcpy (p, aarch64_noop, noop_size);
7178 fragP->fr_var = noop_size;
7181 /* Perform target specific initialisation of a frag.
7182 Note - despite the name this initialisation is not done when the frag
7183 is created, but only when its type is assigned. A frag can be created
7184 and used a long time before its type is set, so beware of assuming that
7185 this initialisation is performed first. */
7189 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7190 int max_chars ATTRIBUTE_UNUSED)
7194 #else /* OBJ_ELF is defined. */
7196 aarch64_init_frag (fragS * fragP, int max_chars)
7198 /* Record a mapping symbol for alignment frags. We will delete this
7199 later if the alignment ends up empty. */
7200 if (!fragP->tc_frag_data.recorded)
7201 fragP->tc_frag_data.recorded = 1;
7203 /* PR 21809: Do not set a mapping state for debug sections
7204 - it just confuses other tools. */
7205 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
7208 switch (fragP->fr_type)
7212 mapping_state_2 (MAP_DATA, max_chars);
7215 /* PR 20364: We can get alignment frags in code sections,
7216 so do not just assume that we should use the MAP_DATA state. */
7217 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7220 mapping_state_2 (MAP_INSN, max_chars);
7227 /* Initialize the DWARF-2 unwind information for this procedure. */
7230 tc_aarch64_frame_initial_instructions (void)
7232 cfi_add_CFA_def_cfa (REG_SP, 0);
7234 #endif /* OBJ_ELF */
7236 /* Convert REGNAME to a DWARF-2 register number. */
7239 tc_aarch64_regname_to_dw2regnum (char *regname)
7241 const reg_entry *reg = parse_reg (®name);
7247 case REG_TYPE_SP_32:
7248 case REG_TYPE_SP_64:
7258 return reg->number + 64;
7266 /* Implement DWARF2_ADDR_SIZE. */
7269 aarch64_dwarf2_addr_size (void)
7271 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7275 return bfd_arch_bits_per_address (stdoutput) / 8;
7278 /* MD interface: Symbol and relocation handling. */
7280 /* Return the address within the segment that a PC-relative fixup is
7281 relative to. For AArch64 PC-relative fixups applied to instructions
7282 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7285 md_pcrel_from_section (fixS * fixP, segT seg)
7287 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7289 /* If this is pc-relative and we are going to emit a relocation
7290 then we just want to put out any pipeline compensation that the linker
7291 will need. Otherwise we want to use the calculated base. */
7293 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7294 || aarch64_force_relocation (fixP)))
7297 /* AArch64 should be consistent for all pc-relative relocations. */
7298 return base + AARCH64_PCREL_OFFSET;
7301 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7302 Otherwise we have no need to default values of symbols. */
7305 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7308 if (name[0] == '_' && name[1] == 'G'
7309 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7313 if (symbol_find (name))
7314 as_bad (_("GOT already in the symbol table"));
7316 GOT_symbol = symbol_new (name, undefined_section,
7317 (valueT) 0, &zero_address_frag);
7327 /* Return non-zero if the indicated VALUE has overflowed the maximum
7328 range expressible by a unsigned number with the indicated number of
7332 unsigned_overflow (valueT value, unsigned bits)
7335 if (bits >= sizeof (valueT) * 8)
7337 lim = (valueT) 1 << bits;
7338 return (value >= lim);
7342 /* Return non-zero if the indicated VALUE has overflowed the maximum
7343 range expressible by an signed number with the indicated number of
7347 signed_overflow (offsetT value, unsigned bits)
7350 if (bits >= sizeof (offsetT) * 8)
7352 lim = (offsetT) 1 << (bits - 1);
7353 return (value < -lim || value >= lim);
7356 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7357 unsigned immediate offset load/store instruction, try to encode it as
7358 an unscaled, 9-bit, signed immediate offset load/store instruction.
7359 Return TRUE if it is successful; otherwise return FALSE.
7361 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7362 in response to the standard LDR/STR mnemonics when the immediate offset is
7363 unambiguous, i.e. when it is negative or unaligned. */
7366 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7369 enum aarch64_op new_op;
7370 const aarch64_opcode *new_opcode;
7372 gas_assert (instr->opcode->iclass == ldst_pos);
7374 switch (instr->opcode->op)
7376 case OP_LDRB_POS:new_op = OP_LDURB; break;
7377 case OP_STRB_POS: new_op = OP_STURB; break;
7378 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7379 case OP_LDRH_POS: new_op = OP_LDURH; break;
7380 case OP_STRH_POS: new_op = OP_STURH; break;
7381 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7382 case OP_LDR_POS: new_op = OP_LDUR; break;
7383 case OP_STR_POS: new_op = OP_STUR; break;
7384 case OP_LDRF_POS: new_op = OP_LDURV; break;
7385 case OP_STRF_POS: new_op = OP_STURV; break;
7386 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7387 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7388 default: new_op = OP_NIL; break;
7391 if (new_op == OP_NIL)
7394 new_opcode = aarch64_get_opcode (new_op);
7395 gas_assert (new_opcode != NULL);
7397 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7398 instr->opcode->op, new_opcode->op);
7400 aarch64_replace_opcode (instr, new_opcode);
7402 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7403 qualifier matching may fail because the out-of-date qualifier will
7404 prevent the operand being updated with a new and correct qualifier. */
7405 idx = aarch64_operand_index (instr->opcode->operands,
7406 AARCH64_OPND_ADDR_SIMM9);
7407 gas_assert (idx == 1);
7408 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7410 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7412 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7419 /* Called by fix_insn to fix a MOV immediate alias instruction.
7421 Operand for a generic move immediate instruction, which is an alias
7422 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7423 a 32-bit/64-bit immediate value into general register. An assembler error
7424 shall result if the immediate cannot be created by a single one of these
7425 instructions. If there is a choice, then to ensure reversability an
7426 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7429 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7431 const aarch64_opcode *opcode;
7433 /* Need to check if the destination is SP/ZR. The check has to be done
7434 before any aarch64_replace_opcode. */
7435 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7436 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7438 instr->operands[1].imm.value = value;
7439 instr->operands[1].skip = 0;
7443 /* Try the MOVZ alias. */
7444 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7445 aarch64_replace_opcode (instr, opcode);
7446 if (aarch64_opcode_encode (instr->opcode, instr,
7447 &instr->value, NULL, NULL, insn_sequence))
7449 put_aarch64_insn (buf, instr->value);
7452 /* Try the MOVK alias. */
7453 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7454 aarch64_replace_opcode (instr, opcode);
7455 if (aarch64_opcode_encode (instr->opcode, instr,
7456 &instr->value, NULL, NULL, insn_sequence))
7458 put_aarch64_insn (buf, instr->value);
7463 if (try_mov_bitmask_p)
7465 /* Try the ORR alias. */
7466 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7467 aarch64_replace_opcode (instr, opcode);
7468 if (aarch64_opcode_encode (instr->opcode, instr,
7469 &instr->value, NULL, NULL, insn_sequence))
7471 put_aarch64_insn (buf, instr->value);
7476 as_bad_where (fixP->fx_file, fixP->fx_line,
7477 _("immediate cannot be moved by a single instruction"));
7480 /* An instruction operand which is immediate related may have symbol used
7481 in the assembly, e.g.
7484 .set u32, 0x00ffff00
7486 At the time when the assembly instruction is parsed, a referenced symbol,
7487 like 'u32' in the above example may not have been seen; a fixS is created
7488 in such a case and is handled here after symbols have been resolved.
7489 Instruction is fixed up with VALUE using the information in *FIXP plus
7490 extra information in FLAGS.
7492 This function is called by md_apply_fix to fix up instructions that need
7493 a fix-up described above but does not involve any linker-time relocation. */
7496 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7500 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7501 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7502 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7506 /* Now the instruction is about to be fixed-up, so the operand that
7507 was previously marked as 'ignored' needs to be unmarked in order
7508 to get the encoding done properly. */
7509 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7510 new_inst->operands[idx].skip = 0;
7513 gas_assert (opnd != AARCH64_OPND_NIL);
7517 case AARCH64_OPND_EXCEPTION:
7518 if (unsigned_overflow (value, 16))
7519 as_bad_where (fixP->fx_file, fixP->fx_line,
7520 _("immediate out of range"));
7521 insn = get_aarch64_insn (buf);
7522 insn |= encode_svc_imm (value);
7523 put_aarch64_insn (buf, insn);
7526 case AARCH64_OPND_AIMM:
7527 /* ADD or SUB with immediate.
7528 NOTE this assumes we come here with a add/sub shifted reg encoding
7529 3 322|2222|2 2 2 21111 111111
7530 1 098|7654|3 2 1 09876 543210 98765 43210
7531 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7532 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7533 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7534 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7536 3 322|2222|2 2 221111111111
7537 1 098|7654|3 2 109876543210 98765 43210
7538 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7539 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7540 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7541 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7542 Fields sf Rn Rd are already set. */
7543 insn = get_aarch64_insn (buf);
7547 insn = reencode_addsub_switch_add_sub (insn);
7551 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7552 && unsigned_overflow (value, 12))
7554 /* Try to shift the value by 12 to make it fit. */
7555 if (((value >> 12) << 12) == value
7556 && ! unsigned_overflow (value, 12 + 12))
7559 insn |= encode_addsub_imm_shift_amount (1);
7563 if (unsigned_overflow (value, 12))
7564 as_bad_where (fixP->fx_file, fixP->fx_line,
7565 _("immediate out of range"));
7567 insn |= encode_addsub_imm (value);
7569 put_aarch64_insn (buf, insn);
7572 case AARCH64_OPND_SIMD_IMM:
7573 case AARCH64_OPND_SIMD_IMM_SFT:
7574 case AARCH64_OPND_LIMM:
7575 /* Bit mask immediate. */
7576 gas_assert (new_inst != NULL);
7577 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7578 new_inst->operands[idx].imm.value = value;
7579 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7580 &new_inst->value, NULL, NULL, insn_sequence))
7581 put_aarch64_insn (buf, new_inst->value);
7583 as_bad_where (fixP->fx_file, fixP->fx_line,
7584 _("invalid immediate"));
7587 case AARCH64_OPND_HALF:
7588 /* 16-bit unsigned immediate. */
7589 if (unsigned_overflow (value, 16))
7590 as_bad_where (fixP->fx_file, fixP->fx_line,
7591 _("immediate out of range"));
7592 insn = get_aarch64_insn (buf);
7593 insn |= encode_movw_imm (value & 0xffff);
7594 put_aarch64_insn (buf, insn);
7597 case AARCH64_OPND_IMM_MOV:
7598 /* Operand for a generic move immediate instruction, which is
7599 an alias instruction that generates a single MOVZ, MOVN or ORR
7600 instruction to loads a 32-bit/64-bit immediate value into general
7601 register. An assembler error shall result if the immediate cannot be
7602 created by a single one of these instructions. If there is a choice,
7603 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7604 and MOVZ or MOVN to ORR. */
7605 gas_assert (new_inst != NULL);
7606 fix_mov_imm_insn (fixP, buf, new_inst, value);
7609 case AARCH64_OPND_ADDR_SIMM7:
7610 case AARCH64_OPND_ADDR_SIMM9:
7611 case AARCH64_OPND_ADDR_SIMM9_2:
7612 case AARCH64_OPND_ADDR_SIMM10:
7613 case AARCH64_OPND_ADDR_UIMM12:
7614 /* Immediate offset in an address. */
7615 insn = get_aarch64_insn (buf);
7617 gas_assert (new_inst != NULL && new_inst->value == insn);
7618 gas_assert (new_inst->opcode->operands[1] == opnd
7619 || new_inst->opcode->operands[2] == opnd);
7621 /* Get the index of the address operand. */
7622 if (new_inst->opcode->operands[1] == opnd)
7623 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7626 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7629 /* Update the resolved offset value. */
7630 new_inst->operands[idx].addr.offset.imm = value;
7632 /* Encode/fix-up. */
7633 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7634 &new_inst->value, NULL, NULL, insn_sequence))
7636 put_aarch64_insn (buf, new_inst->value);
7639 else if (new_inst->opcode->iclass == ldst_pos
7640 && try_to_encode_as_unscaled_ldst (new_inst))
7642 put_aarch64_insn (buf, new_inst->value);
7646 as_bad_where (fixP->fx_file, fixP->fx_line,
7647 _("immediate offset out of range"));
7652 as_fatal (_("unhandled operand code %d"), opnd);
7656 /* Apply a fixup (fixP) to segment data, once it has been determined
7657 by our caller that we have all the info we need to fix it up.
7659 Parameter valP is the pointer to the value of the bits. */
7662 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7664 offsetT value = *valP;
7666 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7668 unsigned flags = fixP->fx_addnumber;
7670 DEBUG_TRACE ("\n\n");
7671 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7672 DEBUG_TRACE ("Enter md_apply_fix");
7674 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7676 /* Note whether this will delete the relocation. */
7678 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7681 /* Process the relocations. */
7682 switch (fixP->fx_r_type)
7684 case BFD_RELOC_NONE:
7685 /* This will need to go in the object file. */
7690 case BFD_RELOC_8_PCREL:
7691 if (fixP->fx_done || !seg->use_rela_p)
7692 md_number_to_chars (buf, value, 1);
7696 case BFD_RELOC_16_PCREL:
7697 if (fixP->fx_done || !seg->use_rela_p)
7698 md_number_to_chars (buf, value, 2);
7702 case BFD_RELOC_32_PCREL:
7703 if (fixP->fx_done || !seg->use_rela_p)
7704 md_number_to_chars (buf, value, 4);
7708 case BFD_RELOC_64_PCREL:
7709 if (fixP->fx_done || !seg->use_rela_p)
7710 md_number_to_chars (buf, value, 8);
7713 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7714 /* We claim that these fixups have been processed here, even if
7715 in fact we generate an error because we do not have a reloc
7716 for them, so tc_gen_reloc() will reject them. */
7718 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7720 as_bad_where (fixP->fx_file, fixP->fx_line,
7721 _("undefined symbol %s used as an immediate value"),
7722 S_GET_NAME (fixP->fx_addsy));
7723 goto apply_fix_return;
7725 fix_insn (fixP, flags, value);
7728 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7729 if (fixP->fx_done || !seg->use_rela_p)
7732 as_bad_where (fixP->fx_file, fixP->fx_line,
7733 _("pc-relative load offset not word aligned"));
7734 if (signed_overflow (value, 21))
7735 as_bad_where (fixP->fx_file, fixP->fx_line,
7736 _("pc-relative load offset out of range"));
7737 insn = get_aarch64_insn (buf);
7738 insn |= encode_ld_lit_ofs_19 (value >> 2);
7739 put_aarch64_insn (buf, insn);
7743 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7744 if (fixP->fx_done || !seg->use_rela_p)
7746 if (signed_overflow (value, 21))
7747 as_bad_where (fixP->fx_file, fixP->fx_line,
7748 _("pc-relative address offset out of range"));
7749 insn = get_aarch64_insn (buf);
7750 insn |= encode_adr_imm (value);
7751 put_aarch64_insn (buf, insn);
7755 case BFD_RELOC_AARCH64_BRANCH19:
7756 if (fixP->fx_done || !seg->use_rela_p)
7759 as_bad_where (fixP->fx_file, fixP->fx_line,
7760 _("conditional branch target not word aligned"));
7761 if (signed_overflow (value, 21))
7762 as_bad_where (fixP->fx_file, fixP->fx_line,
7763 _("conditional branch out of range"));
7764 insn = get_aarch64_insn (buf);
7765 insn |= encode_cond_branch_ofs_19 (value >> 2);
7766 put_aarch64_insn (buf, insn);
7770 case BFD_RELOC_AARCH64_TSTBR14:
7771 if (fixP->fx_done || !seg->use_rela_p)
7774 as_bad_where (fixP->fx_file, fixP->fx_line,
7775 _("conditional branch target not word aligned"));
7776 if (signed_overflow (value, 16))
7777 as_bad_where (fixP->fx_file, fixP->fx_line,
7778 _("conditional branch out of range"));
7779 insn = get_aarch64_insn (buf);
7780 insn |= encode_tst_branch_ofs_14 (value >> 2);
7781 put_aarch64_insn (buf, insn);
7785 case BFD_RELOC_AARCH64_CALL26:
7786 case BFD_RELOC_AARCH64_JUMP26:
7787 if (fixP->fx_done || !seg->use_rela_p)
7790 as_bad_where (fixP->fx_file, fixP->fx_line,
7791 _("branch target not word aligned"));
7792 if (signed_overflow (value, 28))
7793 as_bad_where (fixP->fx_file, fixP->fx_line,
7794 _("branch out of range"));
7795 insn = get_aarch64_insn (buf);
7796 insn |= encode_branch_ofs_26 (value >> 2);
7797 put_aarch64_insn (buf, insn);
7801 case BFD_RELOC_AARCH64_MOVW_G0:
7802 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7803 case BFD_RELOC_AARCH64_MOVW_G0_S:
7804 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7805 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7806 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
7809 case BFD_RELOC_AARCH64_MOVW_G1:
7810 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7811 case BFD_RELOC_AARCH64_MOVW_G1_S:
7812 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7813 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7814 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
7817 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7819 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7820 /* Should always be exported to object file, see
7821 aarch64_force_relocation(). */
7822 gas_assert (!fixP->fx_done);
7823 gas_assert (seg->use_rela_p);
7825 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7827 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7828 /* Should always be exported to object file, see
7829 aarch64_force_relocation(). */
7830 gas_assert (!fixP->fx_done);
7831 gas_assert (seg->use_rela_p);
7833 case BFD_RELOC_AARCH64_MOVW_G2:
7834 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7835 case BFD_RELOC_AARCH64_MOVW_G2_S:
7836 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
7837 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
7840 case BFD_RELOC_AARCH64_MOVW_G3:
7841 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
7844 if (fixP->fx_done || !seg->use_rela_p)
7846 insn = get_aarch64_insn (buf);
7850 /* REL signed addend must fit in 16 bits */
7851 if (signed_overflow (value, 16))
7852 as_bad_where (fixP->fx_file, fixP->fx_line,
7853 _("offset out of range"));
7857 /* Check for overflow and scale. */
7858 switch (fixP->fx_r_type)
7860 case BFD_RELOC_AARCH64_MOVW_G0:
7861 case BFD_RELOC_AARCH64_MOVW_G1:
7862 case BFD_RELOC_AARCH64_MOVW_G2:
7863 case BFD_RELOC_AARCH64_MOVW_G3:
7864 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7865 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7866 if (unsigned_overflow (value, scale + 16))
7867 as_bad_where (fixP->fx_file, fixP->fx_line,
7868 _("unsigned value out of range"));
7870 case BFD_RELOC_AARCH64_MOVW_G0_S:
7871 case BFD_RELOC_AARCH64_MOVW_G1_S:
7872 case BFD_RELOC_AARCH64_MOVW_G2_S:
7873 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7874 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7875 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
7876 /* NOTE: We can only come here with movz or movn. */
7877 if (signed_overflow (value, scale + 16))
7878 as_bad_where (fixP->fx_file, fixP->fx_line,
7879 _("signed value out of range"));
7882 /* Force use of MOVN. */
7884 insn = reencode_movzn_to_movn (insn);
7888 /* Force use of MOVZ. */
7889 insn = reencode_movzn_to_movz (insn);
7893 /* Unchecked relocations. */
7899 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7900 insn |= encode_movw_imm (value & 0xffff);
7902 put_aarch64_insn (buf, insn);
7906 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7907 fixP->fx_r_type = (ilp32_p
7908 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7909 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7910 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7911 /* Should always be exported to object file, see
7912 aarch64_force_relocation(). */
7913 gas_assert (!fixP->fx_done);
7914 gas_assert (seg->use_rela_p);
7917 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7918 fixP->fx_r_type = (ilp32_p
7919 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7920 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
7921 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7922 /* Should always be exported to object file, see
7923 aarch64_force_relocation(). */
7924 gas_assert (!fixP->fx_done);
7925 gas_assert (seg->use_rela_p);
7928 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7929 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7930 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7931 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7932 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
7933 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7934 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7935 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7936 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7937 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7938 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7939 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7940 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7941 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7942 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7943 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7944 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7945 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7946 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7947 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7948 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7949 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7950 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7951 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7952 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7953 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7954 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7955 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7956 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7957 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7958 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7959 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7960 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7961 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7962 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7963 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7964 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
7965 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
7966 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
7967 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
7968 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
7969 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
7970 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
7971 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
7972 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7973 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7974 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7975 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7976 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7977 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7978 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7979 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7980 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7981 /* Should always be exported to object file, see
7982 aarch64_force_relocation(). */
7983 gas_assert (!fixP->fx_done);
7984 gas_assert (seg->use_rela_p);
7987 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7988 /* Should always be exported to object file, see
7989 aarch64_force_relocation(). */
7990 fixP->fx_r_type = (ilp32_p
7991 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7992 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7993 gas_assert (!fixP->fx_done);
7994 gas_assert (seg->use_rela_p);
7997 case BFD_RELOC_AARCH64_ADD_LO12:
7998 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7999 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8000 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8001 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8002 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8003 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8004 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8005 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8006 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8007 case BFD_RELOC_AARCH64_LDST128_LO12:
8008 case BFD_RELOC_AARCH64_LDST16_LO12:
8009 case BFD_RELOC_AARCH64_LDST32_LO12:
8010 case BFD_RELOC_AARCH64_LDST64_LO12:
8011 case BFD_RELOC_AARCH64_LDST8_LO12:
8012 /* Should always be exported to object file, see
8013 aarch64_force_relocation(). */
8014 gas_assert (!fixP->fx_done);
8015 gas_assert (seg->use_rela_p);
8018 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8019 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8020 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8023 case BFD_RELOC_UNUSED:
8024 /* An error will already have been reported. */
8028 as_bad_where (fixP->fx_file, fixP->fx_line,
8029 _("unexpected %s fixup"),
8030 bfd_get_reloc_code_name (fixP->fx_r_type));
8035 /* Free the allocated the struct aarch64_inst.
8036 N.B. currently there are very limited number of fix-up types actually use
8037 this field, so the impact on the performance should be minimal . */
8038 if (fixP->tc_fix_data.inst != NULL)
8039 free (fixP->tc_fix_data.inst);
8044 /* Translate internal representation of relocation info to BFD target
8048 tc_gen_reloc (asection * section, fixS * fixp)
8051 bfd_reloc_code_real_type code;
8053 reloc = XNEW (arelent);
8055 reloc->sym_ptr_ptr = XNEW (asymbol *);
8056 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8057 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8061 if (section->use_rela_p)
8062 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8064 fixp->fx_offset = reloc->address;
8066 reloc->addend = fixp->fx_offset;
8068 code = fixp->fx_r_type;
8073 code = BFD_RELOC_16_PCREL;
8078 code = BFD_RELOC_32_PCREL;
8083 code = BFD_RELOC_64_PCREL;
8090 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8091 if (reloc->howto == NULL)
8093 as_bad_where (fixp->fx_file, fixp->fx_line,
8095 ("cannot represent %s relocation in this object file format"),
8096 bfd_get_reloc_code_name (code));
8103 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8106 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8108 bfd_reloc_code_real_type type;
8112 FIXME: @@ Should look at CPU word size. */
8119 type = BFD_RELOC_16;
8122 type = BFD_RELOC_32;
8125 type = BFD_RELOC_64;
8128 as_bad (_("cannot do %u-byte relocation"), size);
8129 type = BFD_RELOC_UNUSED;
8133 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8137 aarch64_force_relocation (struct fix *fixp)
8139 switch (fixp->fx_r_type)
8141 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8142 /* Perform these "immediate" internal relocations
8143 even if the symbol is extern or weak. */
8146 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8147 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8148 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8149 /* Pseudo relocs that need to be fixed up according to
8153 case BFD_RELOC_AARCH64_ADD_LO12:
8154 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8155 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8156 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8157 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8158 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8159 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8160 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8161 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8162 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8163 case BFD_RELOC_AARCH64_LDST128_LO12:
8164 case BFD_RELOC_AARCH64_LDST16_LO12:
8165 case BFD_RELOC_AARCH64_LDST32_LO12:
8166 case BFD_RELOC_AARCH64_LDST64_LO12:
8167 case BFD_RELOC_AARCH64_LDST8_LO12:
8168 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8169 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8170 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8171 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8172 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8173 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8174 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8175 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8176 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8177 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8178 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8179 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8180 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8181 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8182 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8183 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8184 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8185 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8186 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8187 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8188 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8189 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8190 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8191 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8192 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8193 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8194 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8195 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8196 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8197 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8198 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8199 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8200 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8201 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8202 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8203 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8204 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8205 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8206 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8207 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8208 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8209 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8210 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8211 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8212 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8213 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8214 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8215 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8216 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8217 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8218 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8219 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8220 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8221 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8222 /* Always leave these relocations for the linker. */
8229 return generic_force_reloc (fixp);
8234 /* Implement md_after_parse_args. This is the earliest time we need to decide
8235 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8238 aarch64_after_parse_args (void)
8240 if (aarch64_abi != AARCH64_ABI_NONE)
8243 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8244 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8245 aarch64_abi = AARCH64_ABI_ILP32;
8247 aarch64_abi = AARCH64_ABI_LP64;
8251 elf64_aarch64_target_format (void)
8253 if (strcmp (TARGET_OS, "cloudabi") == 0)
8255 /* FIXME: What to do for ilp32_p ? */
8256 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
8258 if (target_big_endian)
8259 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8261 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8265 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8267 elf_frob_symbol (symp, puntp);
8271 /* MD interface: Finalization. */
8273 /* A good place to do this, although this was probably not intended
8274 for this kind of use. We need to dump the literal pool before
8275 references are made to a null symbol pointer. */
8278 aarch64_cleanup (void)
8282 for (pool = list_of_pools; pool; pool = pool->next)
8284 /* Put it at the end of the relevant section. */
8285 subseg_set (pool->section, pool->sub_section);
8291 /* Remove any excess mapping symbols generated for alignment frags in
8292 SEC. We may have created a mapping symbol before a zero byte
8293 alignment; remove it if there's a mapping symbol after the
8296 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8297 void *dummy ATTRIBUTE_UNUSED)
8299 segment_info_type *seginfo = seg_info (sec);
8302 if (seginfo == NULL || seginfo->frchainP == NULL)
8305 for (fragp = seginfo->frchainP->frch_root;
8306 fragp != NULL; fragp = fragp->fr_next)
8308 symbolS *sym = fragp->tc_frag_data.last_map;
8309 fragS *next = fragp->fr_next;
8311 /* Variable-sized frags have been converted to fixed size by
8312 this point. But if this was variable-sized to start with,
8313 there will be a fixed-size frag after it. So don't handle
8315 if (sym == NULL || next == NULL)
8318 if (S_GET_VALUE (sym) < next->fr_address)
8319 /* Not at the end of this frag. */
8321 know (S_GET_VALUE (sym) == next->fr_address);
8325 if (next->tc_frag_data.first_map != NULL)
8327 /* Next frag starts with a mapping symbol. Discard this
8329 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8333 if (next->fr_next == NULL)
8335 /* This mapping symbol is at the end of the section. Discard
8337 know (next->fr_fix == 0 && next->fr_var == 0);
8338 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8342 /* As long as we have empty frags without any mapping symbols,
8344 /* If the next frag is non-empty and does not start with a
8345 mapping symbol, then this mapping symbol is required. */
8346 if (next->fr_address != next->fr_next->fr_address)
8349 next = next->fr_next;
8351 while (next != NULL);
8356 /* Adjust the symbol table. */
8359 aarch64_adjust_symtab (void)
8362 /* Remove any overlapping mapping symbols generated by alignment frags. */
8363 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8364 /* Now do generic ELF adjustments. */
8365 elf_adjust_symtab ();
8370 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8372 const char *hash_err;
8374 hash_err = hash_insert (table, key, value);
8376 printf ("Internal Error: Can't hash %s\n", key);
8380 fill_instruction_hash_table (void)
8382 aarch64_opcode *opcode = aarch64_opcode_table;
8384 while (opcode->name != NULL)
8386 templates *templ, *new_templ;
8387 templ = hash_find (aarch64_ops_hsh, opcode->name);
8389 new_templ = XNEW (templates);
8390 new_templ->opcode = opcode;
8391 new_templ->next = NULL;
8394 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8397 new_templ->next = templ->next;
8398 templ->next = new_templ;
8405 convert_to_upper (char *dst, const char *src, size_t num)
8408 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8409 *dst = TOUPPER (*src);
8413 /* Assume STR point to a lower-case string, allocate, convert and return
8414 the corresponding upper-case string. */
8415 static inline const char*
8416 get_upper_str (const char *str)
8419 size_t len = strlen (str);
8420 ret = XNEWVEC (char, len + 1);
8421 convert_to_upper (ret, str, len);
8425 /* MD interface: Initialization. */
8433 if ((aarch64_ops_hsh = hash_new ()) == NULL
8434 || (aarch64_cond_hsh = hash_new ()) == NULL
8435 || (aarch64_shift_hsh = hash_new ()) == NULL
8436 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8437 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8438 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8439 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8440 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8441 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8442 || (aarch64_reg_hsh = hash_new ()) == NULL
8443 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8444 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8445 || (aarch64_pldop_hsh = hash_new ()) == NULL
8446 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8447 as_fatal (_("virtual memory exhausted"));
8449 fill_instruction_hash_table ();
8451 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8452 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8453 (void *) (aarch64_sys_regs + i));
8455 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8456 checked_hash_insert (aarch64_pstatefield_hsh,
8457 aarch64_pstatefields[i].name,
8458 (void *) (aarch64_pstatefields + i));
8460 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8461 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8462 aarch64_sys_regs_ic[i].name,
8463 (void *) (aarch64_sys_regs_ic + i));
8465 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8466 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8467 aarch64_sys_regs_dc[i].name,
8468 (void *) (aarch64_sys_regs_dc + i));
8470 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8471 checked_hash_insert (aarch64_sys_regs_at_hsh,
8472 aarch64_sys_regs_at[i].name,
8473 (void *) (aarch64_sys_regs_at + i));
8475 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8476 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8477 aarch64_sys_regs_tlbi[i].name,
8478 (void *) (aarch64_sys_regs_tlbi + i));
8480 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8481 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8482 (void *) (reg_names + i));
8484 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8485 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8486 (void *) (nzcv_names + i));
8488 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8490 const char *name = aarch64_operand_modifiers[i].name;
8491 checked_hash_insert (aarch64_shift_hsh, name,
8492 (void *) (aarch64_operand_modifiers + i));
8493 /* Also hash the name in the upper case. */
8494 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8495 (void *) (aarch64_operand_modifiers + i));
8498 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8501 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8502 the same condition code. */
8503 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8505 const char *name = aarch64_conds[i].names[j];
8508 checked_hash_insert (aarch64_cond_hsh, name,
8509 (void *) (aarch64_conds + i));
8510 /* Also hash the name in the upper case. */
8511 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8512 (void *) (aarch64_conds + i));
8516 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8518 const char *name = aarch64_barrier_options[i].name;
8519 /* Skip xx00 - the unallocated values of option. */
8522 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8523 (void *) (aarch64_barrier_options + i));
8524 /* Also hash the name in the upper case. */
8525 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8526 (void *) (aarch64_barrier_options + i));
8529 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8531 const char* name = aarch64_prfops[i].name;
8532 /* Skip the unallocated hint encodings. */
8535 checked_hash_insert (aarch64_pldop_hsh, name,
8536 (void *) (aarch64_prfops + i));
8537 /* Also hash the name in the upper case. */
8538 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8539 (void *) (aarch64_prfops + i));
8542 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8544 const char* name = aarch64_hint_options[i].name;
8546 checked_hash_insert (aarch64_hint_opt_hsh, name,
8547 (void *) (aarch64_hint_options + i));
8548 /* Also hash the name in the upper case. */
8549 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8550 (void *) (aarch64_hint_options + i));
8553 /* Set the cpu variant based on the command-line options. */
8555 mcpu_cpu_opt = march_cpu_opt;
8558 mcpu_cpu_opt = &cpu_default;
8560 cpu_variant = *mcpu_cpu_opt;
8562 /* Record the CPU type. */
8563 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8565 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8568 /* Command line processing. */
8570 const char *md_shortopts = "m:";
8572 #ifdef AARCH64_BI_ENDIAN
8573 #define OPTION_EB (OPTION_MD_BASE + 0)
8574 #define OPTION_EL (OPTION_MD_BASE + 1)
8576 #if TARGET_BYTES_BIG_ENDIAN
8577 #define OPTION_EB (OPTION_MD_BASE + 0)
8579 #define OPTION_EL (OPTION_MD_BASE + 1)
8583 struct option md_longopts[] = {
8585 {"EB", no_argument, NULL, OPTION_EB},
8588 {"EL", no_argument, NULL, OPTION_EL},
8590 {NULL, no_argument, NULL, 0}
8593 size_t md_longopts_size = sizeof (md_longopts);
8595 struct aarch64_option_table
8597 const char *option; /* Option name to match. */
8598 const char *help; /* Help information. */
8599 int *var; /* Variable to change. */
8600 int value; /* What to change it to. */
8601 char *deprecated; /* If non-null, print this message. */
8604 static struct aarch64_option_table aarch64_opts[] = {
8605 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8606 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8608 #ifdef DEBUG_AARCH64
8609 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8610 #endif /* DEBUG_AARCH64 */
8611 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8613 {"mno-verbose-error", N_("do not output verbose error messages"),
8614 &verbose_error_p, 0, NULL},
8615 {NULL, NULL, NULL, 0, NULL}
8618 struct aarch64_cpu_option_table
8621 const aarch64_feature_set value;
8622 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8624 const char *canonical_name;
8627 /* This list should, at a minimum, contain all the cpu names
8628 recognized by GCC. */
8629 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8630 {"all", AARCH64_ANY, NULL},
8631 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8632 AARCH64_FEATURE_CRC), "Cortex-A35"},
8633 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8634 AARCH64_FEATURE_CRC), "Cortex-A53"},
8635 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8636 AARCH64_FEATURE_CRC), "Cortex-A57"},
8637 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8638 AARCH64_FEATURE_CRC), "Cortex-A72"},
8639 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8640 AARCH64_FEATURE_CRC), "Cortex-A73"},
8641 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8642 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8644 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8645 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8647 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8648 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8650 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8651 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8652 "Samsung Exynos M1"},
8653 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8654 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8655 | AARCH64_FEATURE_RDMA),
8657 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8658 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8659 | AARCH64_FEATURE_RDMA),
8660 "Qualcomm QDF24XX"},
8661 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8662 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8663 "Qualcomm Saphira"},
8664 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8665 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8667 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8668 AARCH64_FEATURE_CRYPTO),
8670 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8671 in earlier releases and is superseded by 'xgene1' in all
8673 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8674 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8675 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8676 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8677 {"generic", AARCH64_ARCH_V8, NULL},
8679 {NULL, AARCH64_ARCH_NONE, NULL}
8682 struct aarch64_arch_option_table
8685 const aarch64_feature_set value;
8688 /* This list should, at a minimum, contain all the architecture names
8689 recognized by GCC. */
8690 static const struct aarch64_arch_option_table aarch64_archs[] = {
8691 {"all", AARCH64_ANY},
8692 {"armv8-a", AARCH64_ARCH_V8},
8693 {"armv8.1-a", AARCH64_ARCH_V8_1},
8694 {"armv8.2-a", AARCH64_ARCH_V8_2},
8695 {"armv8.3-a", AARCH64_ARCH_V8_3},
8696 {"armv8.4-a", AARCH64_ARCH_V8_4},
8697 {"armv8.5-a", AARCH64_ARCH_V8_5},
8698 {NULL, AARCH64_ARCH_NONE}
8701 /* ISA extensions. */
8702 struct aarch64_option_cpu_value_table
8705 const aarch64_feature_set value;
8706 const aarch64_feature_set require; /* Feature dependencies. */
8709 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8710 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8712 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO
8713 | AARCH64_FEATURE_AES
8714 | AARCH64_FEATURE_SHA2, 0),
8715 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8716 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8718 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8720 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8721 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8722 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8724 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8726 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8728 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8729 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8730 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8731 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8732 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
8733 AARCH64_FEATURE (AARCH64_FEATURE_FP
8734 | AARCH64_FEATURE_F16, 0)},
8735 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8737 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8738 AARCH64_FEATURE (AARCH64_FEATURE_F16
8739 | AARCH64_FEATURE_SIMD
8740 | AARCH64_FEATURE_COMPNUM, 0)},
8741 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
8742 AARCH64_FEATURE (AARCH64_FEATURE_F16
8743 | AARCH64_FEATURE_SIMD, 0)},
8744 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
8746 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
8748 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
8750 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
8752 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
8754 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA2
8755 | AARCH64_FEATURE_SHA3, 0),
8757 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8760 struct aarch64_long_option_table
8762 const char *option; /* Substring to match. */
8763 const char *help; /* Help information. */
8764 int (*func) (const char *subopt); /* Function to decode sub-option. */
8765 char *deprecated; /* If non-null, print this message. */
8768 /* Transitive closure of features depending on set. */
8769 static aarch64_feature_set
8770 aarch64_feature_disable_set (aarch64_feature_set set)
8772 const struct aarch64_option_cpu_value_table *opt;
8773 aarch64_feature_set prev = 0;
8775 while (prev != set) {
8777 for (opt = aarch64_features; opt->name != NULL; opt++)
8778 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8779 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8784 /* Transitive closure of dependencies of set. */
8785 static aarch64_feature_set
8786 aarch64_feature_enable_set (aarch64_feature_set set)
8788 const struct aarch64_option_cpu_value_table *opt;
8789 aarch64_feature_set prev = 0;
8791 while (prev != set) {
8793 for (opt = aarch64_features; opt->name != NULL; opt++)
8794 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8795 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8801 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8802 bfd_boolean ext_only)
8804 /* We insist on extensions being added before being removed. We achieve
8805 this by using the ADDING_VALUE variable to indicate whether we are
8806 adding an extension (1) or removing it (0) and only allowing it to
8807 change in the order -1 -> 1 -> 0. */
8808 int adding_value = -1;
8809 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8811 /* Copy the feature set, so that we can modify it. */
8815 while (str != NULL && *str != 0)
8817 const struct aarch64_option_cpu_value_table *opt;
8818 const char *ext = NULL;
8825 as_bad (_("invalid architectural extension"));
8829 ext = strchr (++str, '+');
8835 optlen = strlen (str);
8837 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8839 if (adding_value != 0)
8844 else if (optlen > 0)
8846 if (adding_value == -1)
8848 else if (adding_value != 1)
8850 as_bad (_("must specify extensions to add before specifying "
8851 "those to remove"));
8858 as_bad (_("missing architectural extension"));
8862 gas_assert (adding_value != -1);
8864 for (opt = aarch64_features; opt->name != NULL; opt++)
8865 if (strncmp (opt->name, str, optlen) == 0)
8867 aarch64_feature_set set;
8869 /* Add or remove the extension. */
8872 set = aarch64_feature_enable_set (opt->value);
8873 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8877 set = aarch64_feature_disable_set (opt->value);
8878 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8883 if (opt->name == NULL)
8885 as_bad (_("unknown architectural extension `%s'"), str);
8896 aarch64_parse_cpu (const char *str)
8898 const struct aarch64_cpu_option_table *opt;
8899 const char *ext = strchr (str, '+');
8905 optlen = strlen (str);
8909 as_bad (_("missing cpu name `%s'"), str);
8913 for (opt = aarch64_cpus; opt->name != NULL; opt++)
8914 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8916 mcpu_cpu_opt = &opt->value;
8918 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
8923 as_bad (_("unknown cpu `%s'"), str);
8928 aarch64_parse_arch (const char *str)
8930 const struct aarch64_arch_option_table *opt;
8931 const char *ext = strchr (str, '+');
8937 optlen = strlen (str);
8941 as_bad (_("missing architecture name `%s'"), str);
8945 for (opt = aarch64_archs; opt->name != NULL; opt++)
8946 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8948 march_cpu_opt = &opt->value;
8950 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
8955 as_bad (_("unknown architecture `%s'\n"), str);
8960 struct aarch64_option_abi_value_table
8963 enum aarch64_abi_type value;
8966 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
8967 {"ilp32", AARCH64_ABI_ILP32},
8968 {"lp64", AARCH64_ABI_LP64},
8972 aarch64_parse_abi (const char *str)
8978 as_bad (_("missing abi name `%s'"), str);
8982 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8983 if (strcmp (str, aarch64_abis[i].name) == 0)
8985 aarch64_abi = aarch64_abis[i].value;
8989 as_bad (_("unknown abi `%s'\n"), str);
8993 static struct aarch64_long_option_table aarch64_long_opts[] = {
8995 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
8996 aarch64_parse_abi, NULL},
8997 #endif /* OBJ_ELF */
8998 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
8999 aarch64_parse_cpu, NULL},
9000 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9001 aarch64_parse_arch, NULL},
9002 {NULL, NULL, 0, NULL}
9006 md_parse_option (int c, const char *arg)
9008 struct aarch64_option_table *opt;
9009 struct aarch64_long_option_table *lopt;
9015 target_big_endian = 1;
9021 target_big_endian = 0;
9026 /* Listing option. Just ignore these, we don't support additional
9031 for (opt = aarch64_opts; opt->option != NULL; opt++)
9033 if (c == opt->option[0]
9034 && ((arg == NULL && opt->option[1] == 0)
9035 || streq (arg, opt->option + 1)))
9037 /* If the option is deprecated, tell the user. */
9038 if (opt->deprecated != NULL)
9039 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9040 arg ? arg : "", _(opt->deprecated));
9042 if (opt->var != NULL)
9043 *opt->var = opt->value;
9049 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9051 /* These options are expected to have an argument. */
9052 if (c == lopt->option[0]
9054 && strncmp (arg, lopt->option + 1,
9055 strlen (lopt->option + 1)) == 0)
9057 /* If the option is deprecated, tell the user. */
9058 if (lopt->deprecated != NULL)
9059 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9060 _(lopt->deprecated));
9062 /* Call the sup-option parser. */
9063 return lopt->func (arg + strlen (lopt->option) - 1);
9074 md_show_usage (FILE * fp)
9076 struct aarch64_option_table *opt;
9077 struct aarch64_long_option_table *lopt;
9079 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9081 for (opt = aarch64_opts; opt->option != NULL; opt++)
9082 if (opt->help != NULL)
9083 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9085 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9086 if (lopt->help != NULL)
9087 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9091 -EB assemble code for a big-endian cpu\n"));
9096 -EL assemble code for a little-endian cpu\n"));
9100 /* Parse a .cpu directive. */
9103 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9105 const struct aarch64_cpu_option_table *opt;
9111 name = input_line_pointer;
9112 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9113 input_line_pointer++;
9114 saved_char = *input_line_pointer;
9115 *input_line_pointer = 0;
9117 ext = strchr (name, '+');
9120 optlen = ext - name;
9122 optlen = strlen (name);
9124 /* Skip the first "all" entry. */
9125 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9126 if (strlen (opt->name) == optlen
9127 && strncmp (name, opt->name, optlen) == 0)
9129 mcpu_cpu_opt = &opt->value;
9131 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9134 cpu_variant = *mcpu_cpu_opt;
9136 *input_line_pointer = saved_char;
9137 demand_empty_rest_of_line ();
9140 as_bad (_("unknown cpu `%s'"), name);
9141 *input_line_pointer = saved_char;
9142 ignore_rest_of_line ();
9146 /* Parse a .arch directive. */
9149 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9151 const struct aarch64_arch_option_table *opt;
9157 name = input_line_pointer;
9158 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9159 input_line_pointer++;
9160 saved_char = *input_line_pointer;
9161 *input_line_pointer = 0;
9163 ext = strchr (name, '+');
9166 optlen = ext - name;
9168 optlen = strlen (name);
9170 /* Skip the first "all" entry. */
9171 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9172 if (strlen (opt->name) == optlen
9173 && strncmp (name, opt->name, optlen) == 0)
9175 mcpu_cpu_opt = &opt->value;
9177 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9180 cpu_variant = *mcpu_cpu_opt;
9182 *input_line_pointer = saved_char;
9183 demand_empty_rest_of_line ();
9187 as_bad (_("unknown architecture `%s'\n"), name);
9188 *input_line_pointer = saved_char;
9189 ignore_rest_of_line ();
9192 /* Parse a .arch_extension directive. */
9195 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9198 char *ext = input_line_pointer;;
9200 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9201 input_line_pointer++;
9202 saved_char = *input_line_pointer;
9203 *input_line_pointer = 0;
9205 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9208 cpu_variant = *mcpu_cpu_opt;
9210 *input_line_pointer = saved_char;
9211 demand_empty_rest_of_line ();
9214 /* Copy symbol information. */
9217 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9219 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);