int num_suffix)
{
char buffer[50];
-
+
if (num_suffix == 0)
sprintf (buffer, "__gnu_%s%s", funcname, modename);
else
sprintf (buffer, "__gnu_%s%s%d", funcname, modename, num_suffix);
-
+
set_optab_libfunc (optable, mode, buffer);
}
{
char buffer[50];
const char *maybe_suffix_2 = "";
-
+
/* Follow the logic for selecting a "2" suffix in fixed-bit.h. */
if (ALL_FIXED_POINT_MODE_P (from) && ALL_FIXED_POINT_MODE_P (to)
&& UNSIGNED_FIXED_POINT_MODE_P (from) == UNSIGNED_FIXED_POINT_MODE_P (to)
&& ALL_FRACT_MODE_P (from) == ALL_FRACT_MODE_P (to))
maybe_suffix_2 = "2";
-
+
sprintf (buffer, "__gnu_%s%s%s%s", funcname, fromname, toname,
maybe_suffix_2);
(arm_fp16_format == ARM_FP16_FORMAT_IEEE
? "__gnu_f2h_ieee"
: "__gnu_f2h_alternative"));
- set_conv_libfunc (sext_optab, SFmode, HFmode,
+ set_conv_libfunc (sext_optab, SFmode, HFmode,
(arm_fp16_format == ARM_FP16_FORMAT_IEEE
? "__gnu_h2f_ieee"
: "__gnu_h2f_alternative"));
-
+
/* Arithmetic. */
set_optab_libfunc (add_optab, HFmode, NULL);
set_optab_libfunc (sdiv_optab, HFmode, NULL);
{
tree va_list_name;
tree ap_field;
-
+
if (!TARGET_AAPCS_BASED)
return std_build_builtin_va_list ();
/* AAPCS \S 7.1.4 requires that va_list be a typedef for a type
defined as:
- struct __va_list
+ struct __va_list
{
void *__ap;
};
TYPE_STUB_DECL (va_list_type) = va_list_name;
/* Create the __ap field. */
ap_field = build_decl (BUILTINS_LOCATION,
- FIELD_DECL,
+ FIELD_DECL,
get_identifier ("__ap"),
ptr_type_node);
DECL_ARTIFICIAL (ap_field) = 1;
if (TARGET_AAPCS_BASED)
{
tree ap_field = TYPE_FIELDS (TREE_TYPE (valist));
- valist = build3 (COMPONENT_REF, TREE_TYPE (ap_field),
+ valist = build3 (COMPONENT_REF, TREE_TYPE (ap_field),
valist, ap_field, NULL_TREE);
}
/* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
static tree
-arm_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
+arm_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
gimple_seq *post_p)
{
valist = arm_extract_valist_ptr (valist);
if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
return 0;
- if (flag_pic
+ if (flag_pic
&& arm_pic_register != INVALID_REGNUM
&& df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
return 0;
/* Get the number of trailing zeros. */
lowbit = ffs((int) i) - 1;
-
+
/* Only even shifts are allowed in ARM mode so round down to the
nearest even number. */
if (TARGET_ARM)
/* Try and find a way of doing the job in either two or three
instructions.
-
+
In ARM mode we can use 8-bit constants, rotated to any 2-bit aligned
location. We start at position I. This may be the MSB, or
- optimial_immediate_sequence may have positioned it at the largest block
+ optimial_immediate_sequence may have positioned it at the largest block
of zeros that are aligned on a 2-bit boundary. We then fill up the temps,
wrapping around to the top of the word when we drop off the bottom.
In the worst case this code should produce no more than four insns.
/* Next, see if we can do a better job with a thumb2 replicated
constant.
-
+
We do it this way around to catch the cases like 0x01F001E0 where
two 8-bit immediates would work, but a replicated constant would
make it worse.
-
+
TODO: 16-bit constants that don't clear all the bits, but still win.
TODO: Arithmetic splitting for set/add/sub, rather than bitwise. */
if (TARGET_THUMB2)
|| (matching_bytes == 2
&& const_ok_for_op (remainder & ~tmp2, code))))
{
- /* At least 3 of the bytes match, and the fourth has at
+ /* At least 3 of the bytes match, and the fourth has at
least as many bits set, or two of the bytes match
and it will only require one more insn to finish. */
result = tmp2;
convert_optab_libfunc (sfloat_optab, SFmode, DImode));
add_libcall (libcall_htab,
convert_optab_libfunc (sfloat_optab, DFmode, DImode));
-
+
add_libcall (libcall_htab,
convert_optab_libfunc (ufloat_optab, SFmode, SImode));
add_libcall (libcall_htab,
(no argument is ever a candidate for a co-processor
register). */
bool base_rules = stdarg_p (type);
-
+
if (user_convention)
{
if (user_pcs > ARM_PCS_AAPCS_LOCAL)
static void
aapcs_vfp_cum_init (CUMULATIVE_ARGS *pcum ATTRIBUTE_UNUSED,
const_tree fntype ATTRIBUTE_UNUSED,
- rtx libcall ATTRIBUTE_UNUSED,
+ rtx libcall ATTRIBUTE_UNUSED,
const_tree fndecl ATTRIBUTE_UNUSED)
{
/* Record the unallocated VFP registers. */
return count;
}
-
+
case RECORD_TYPE:
{
int count = 0;
}
static bool
-aapcs_vfp_is_call_candidate (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
+aapcs_vfp_is_call_candidate (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
const_tree type)
{
if (!use_vfp_abi (pcum->pcs_variant, false))
int shift = GET_MODE_SIZE (pcum->aapcs_vfp_rmode) / GET_MODE_SIZE (SFmode);
unsigned mask = (1 << (shift * pcum->aapcs_vfp_rcount)) - 1;
int regno;
-
+
for (regno = 0; regno < NUM_VFP_ARG_REGS; regno += shift)
if (((pcum->aapcs_vfp_regs_free >> regno) & mask) == mask)
{
par = gen_rtx_PARALLEL (mode, rtvec_alloc (rcount));
for (i = 0; i < rcount; i++)
{
- rtx tmp = gen_rtx_REG (rmode,
+ rtx tmp = gen_rtx_REG (rmode,
FIRST_VFP_REGNUM + regno + i * rshift);
tmp = gen_rtx_EXPR_LIST
- (VOIDmode, tmp,
+ (VOIDmode, tmp,
GEN_INT (i * GET_MODE_SIZE (rmode)));
XVECEXP (par, 0, i) = tmp;
}
int i;
rtx par;
int shift;
-
+
aapcs_vfp_is_call_or_return_candidate (pcs_variant, mode, type,
&ag_mode, &count);
for (i = 0; i < count; i++)
{
rtx tmp = gen_rtx_REG (ag_mode, FIRST_VFP_REGNUM + i * shift);
- tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
+ tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
GEN_INT (i * GET_MODE_SIZE (ag_mode)));
XVECEXP (par, 0, i) = tmp;
}
and stops after the first match. If that entry then fails to put
the argument into a co-processor register, the argument will go on
the stack. */
-static struct
+static struct
{
/* Initialize co-processor related state in CUMULATIVE_ARGS structure. */
void (*cum_init) (CUMULATIVE_ARGS *, const_tree, rtx, const_tree);
#undef AAPCS_CP
static int
-aapcs_select_call_coproc (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
+aapcs_select_call_coproc (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
const_tree type)
{
int i;
int i;
for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++)
- if (aapcs_cp_arg_layout[i].is_return_candidate (pcs_variant,
+ if (aapcs_cp_arg_layout[i].is_return_candidate (pcs_variant,
TYPE_MODE (type),
type))
return i;
anonymous argument which is on the stack. */
if (!named)
return;
-
+
/* Is this a potential co-processor register candidate? */
if (pcum->pcs_variant != ARM_PCS_AAPCS)
{
{
if (arm_libcall_uses_aapcs_base (libname))
pcum->pcs_variant = ARM_PCS_AAPCS;
-
+
pcum->aapcs_ncrn = pcum->aapcs_next_ncrn = 0;
pcum->aapcs_reg = NULL_RTX;
pcum->aapcs_partial = 0;
{
bool use_ldrd;
enum rtx_code code = GET_CODE (x);
-
+
if (arm_address_register_rtx_p (x, strict_p))
return 1;
offset = INTVAL(addend);
if (GET_MODE_SIZE (mode) <= 4)
return (offset > -256 && offset < 256);
-
+
return (use_ldrd && offset > -1024 && offset < 1024
&& (offset & 3) == 0);
}
thumb2_index_mul_operand (rtx op)
{
HOST_WIDE_INT val;
-
+
if (GET_CODE(op) != CONST_INT)
return false;
val = INTVAL(op);
return (val == 1 || val == 2 || val == 4 || val == 8);
}
-
+
/* Return nonzero if INDEX is a valid Thumb-2 address index operand. */
static int
thumb2_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno));
else
emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
-
+
*valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX,
LCT_PURE, /* LCT_CONST? */
Pmode, 1, reg, Pmode);
-
+
insns = get_insns ();
end_sequence ();
GEN_INT (!TARGET_ARM)),
UNSPEC_TLS);
rtx reg0 = load_tls_operand (sum, gen_rtx_REG (SImode, 0));
-
+
emit_insn (gen_tlscall (x, labelno));
if (!reg)
reg = gen_reg_rtx (SImode);
reg = arm_tls_descseq_addr (x, reg);
tp = arm_load_tp (NULL_RTX);
-
+
dest = gen_rtx_PLUS (Pmode, tp, reg);
}
else
reg = arm_tls_descseq_addr (x, reg);
tp = arm_load_tp (NULL_RTX);
-
+
dest = gen_rtx_PLUS (Pmode, tp, reg);
}
else
{
insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
-
+
/* Attach a unique REG_EQUIV, to allow the RTL optimizers to
share the LDM result with other LD model accesses. */
eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx),
UNSPEC_TLS);
dest = gen_reg_rtx (Pmode);
emit_libcall_block (insns, dest, ret, eqv);
-
+
/* Load the addend. */
addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x,
GEN_INT (TLS_LDO32)),
*total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, 0, speed);
return true;
}
-
+
return false;
case UMIN:
{
if (GET_CODE (PATTERN (insn)) == SET)
{
- if (GET_MODE_CLASS
+ if (GET_MODE_CLASS
(GET_MODE (SET_DEST (PATTERN (insn)))) == MODE_FLOAT
- || GET_MODE_CLASS
+ || GET_MODE_CLASS
(GET_MODE (SET_SRC (PATTERN (insn)))) == MODE_FLOAT)
{
enum attr_type attr_type_insn = get_attr_type (insn);
{
/* FMACS is a special case where the dependant
instruction can be issued 3 cycles before
- the normal latency in case of an output
+ the normal latency in case of an output
dependency. */
if ((attr_type_insn == TYPE_FMACS
|| attr_type_insn == TYPE_FMACD)
It corrects the value of COST based on the relationship between
INSN and DEP through the dependence LINK. It returns the new
value. There is a per-core adjust_cost hook to adjust scheduler costs
- and the per-core hook can choose to completely override the generic
- adjust_cost function. Only put bits of code into arm_adjust_cost that
+ and the per-core hook can choose to completely override the generic
+ adjust_cost function. Only put bits of code into arm_adjust_cost that
are common across all cores. */
static int
arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
constant pool are cached, and that others will miss. This is a
hack. */
- if ((GET_CODE (src_mem) == SYMBOL_REF
+ if ((GET_CODE (src_mem) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (src_mem))
|| reg_mentioned_p (stack_pointer_rtx, src_mem)
|| reg_mentioned_p (frame_pointer_rtx, src_mem)
word. */
if (recog_memoized (insn) == CODE_FOR_tlscall)
return true;
-
+
return for_each_rtx (&PATTERN (insn), arm_note_pic_base, NULL);
}
/* A compare with a shifted operand. Because of canonicalization, the
comparison will have to be swapped when we emit the assembler. */
- if (GET_MODE (y) == SImode
+ if (GET_MODE (y) == SImode
&& (REG_P (y) || (GET_CODE (y) == SUBREG))
&& (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
|| GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
/* This operation is performed swapped, but since we only rely on the Z
flag we don't need an additional mode. */
- if (GET_MODE (y) == SImode
+ if (GET_MODE (y) == SImode
&& (REG_P (y) || (GET_CODE (y) == SUBREG))
&& GET_CODE (x) == NEG
&& (op == EQ || op == NE))
still put the pool after the table. */
new_cost = arm_barrier_cost (from);
- if (count < max_count
+ if (count < max_count
&& (!selected || new_cost <= selected_cost))
{
selected = tmp;
if (TARGET_THUMB2)
thumb2_reorg ();
-
+
minipool_fix_head = minipool_fix_tail = NULL;
/* The first insn must always be a note, or the code below won't
if (count)
*count = 1;
- /* The only case when this might happen is when
+ /* The only case when this might happen is when
you are looking at the length of a DImode instruction
that has an invalid constant in it. */
if (code0 == REG && code1 != MEM)
*count = 2;
return "";
}
-
if (code0 == REG)
{
gcc_assert (TARGET_LDRD);
if (emit)
output_asm_insn ("ldr%(d%)\t%0, [%m1, #8]!", operands);
-
break;
case PRE_DEC:
break;
case POST_INC:
-
if (emit)
{
if (TARGET_LDRD)
}
if (GET_CODE (otherops[2]) == CONST_INT)
- {
+ {
if (emit)
{
if (!(const_ok_for_arm (INTVAL (otherops[2]))))
else
output_asm_insn ("add%?\t%0, %1, %2", otherops);
}
-
}
else
{
if (TARGET_LDRD)
return "ldr%(d%)\t%0, [%1]";
-
- return "ldm%(ia%)\t%1, %M0";
+
+ return "ldm%(ia%)\t%1, %M0";
}
else
{
}
if (count)
*count = 2;
-
}
}
ops[0] = XEXP (addr, 0);
ops[1] = reg;
break;
-
+
case POST_MODIFY:
/* FIXME: Not currently enabled in neon_vector_mem_operand. */
gcc_unreachable ();
}
-/* Compute the number of bytes used to store the static chain register on the
+/* Compute the number of bytes used to store the static chain register on the
stack, above the stack frame. We need to know this accurately to get the
alignment of the rest of the stack frame correct. */
then try to pop r3 instead. */
if (stack_adjust)
live_regs_mask |= 1 << 3;
-
+
if (TARGET_UNIFIED_ASM)
sprintf (instr, "ldmfd%s\t%%|sp, {", conditional);
else
/* If we have already generated the return instruction
then it is futile to generate anything else. */
- if (use_return_insn (FALSE, sibling) &&
+ if (use_return_insn (FALSE, sibling) &&
(cfun->machine->return_used_this_function != 0))
return "";
{
operands[0] = stack_pointer_rtx;
operands[1] = hard_frame_pointer_rtx;
-
+
operands[2] = GEN_INT (offsets->frame - offsets->saved_regs);
output_add_immediate (operands);
}
}
}
}
-
+
if (amount)
{
operands[1] = operands[0];
{
int reg = -1;
- /* If it is safe to use r3, then do so. This sometimes
+ /* If it is safe to use r3, then do so. This sometimes
generates better code on Thumb-2 by avoiding the need to
use 32-bit push/pop instructions. */
if (! any_sibcall_uses_r3 ()
&& TARGET_ARM)
{
rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
-
+
emit_set_insn (lr, plus_constant (lr, -4));
}
if (TARGET_UNIFIED_ASM)
arm_print_condition (stream);
break;
-
+
case '.':
/* The current condition code for a condition code setting instruction.
Preceded by 's' in unified syntax, otherwise followed by 's'. */
of the target. */
align = MEM_ALIGN (x) >> 3;
memsize = MEM_SIZE (x);
-
+
/* Only certain alignment specifiers are supported by the hardware. */
if (memsize == 16 && (align % 32) == 0)
align_bits = 256;
align_bits = 64;
else
align_bits = 0;
-
+
if (align_bits != 0)
asm_fprintf (stream, ":%d", align_bits);
fprintf (stream, "d%d[%d]", regno/2, ((regno % 2) ? 2 : 0));
}
return;
-
+
default:
if (x == 0)
{
fputs (":lower16:", stream);
x = XEXP (x, 0);
}
-
+
output_addr_const (stream, x);
break;
}
if (!TARGET_AAPCS_BASED)
{
- (is_ctor ?
- default_named_section_asm_out_constructor
+ (is_ctor ?
+ default_named_section_asm_out_constructor
: default_named_section_asm_out_destructor) (symbol, priority);
return;
}
if (priority != DEFAULT_INIT_PRIORITY)
{
char buf[18];
- sprintf (buf, "%s.%.5u",
+ sprintf (buf, "%s.%.5u",
is_ctor ? ".init_array" : ".fini_array",
priority);
s = get_section (buf, SECTION_WRITE, NULL_TREE);
/* Returns the index of the ARM condition code string in
`arm_condition_codes', or ARM_NV if the comparison is invalid.
COMPARISON should be an rtx like `(eq (...) (...))'. */
+
enum arm_cond_code
maybe_get_arm_condition_code (rtx comparison)
{
if (IS_IWMMXT_REGNUM (regno))
return VALID_IWMMXT_REG_MODE (mode);
}
-
+
/* We allow almost any value to be stored in the general registers.
Restrict doubleword quantities to even register pairs so that we can
use ldrd. Do not allow very large Neon structure opaque modes in
} \
} \
while (0)
-
+
struct builtin_description
{
const unsigned int mask;
#define IWMMXT_BUILTIN(code, string, builtin) \
{ FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
ARM_BUILTIN_##builtin, UNKNOWN, 0 },
-
+
IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
-
+
#define IWMMXT_BUILTIN2(code, builtin) \
{ FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, UNKNOWN, 0 },
-
+
IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
};
-
+
static const struct builtin_description bdesc_1arg[] =
{
IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
};
-
+
/* Set up all the iWMMXt builtins. This is not called if
TARGET_IWMMXT is zero. */
= build_function_type_list (long_long_unsigned_type_node,
V4HI_type_node,V4HI_type_node,
NULL_TREE);
-
+
/* Normal vector binops. */
tree v8qi_ftype_v8qi_v8qi
= build_function_type_list (V8QI_type_node,
long_long_unsigned_type_node,
long_long_unsigned_type_node,
NULL_TREE);
-
+
/* Add all builtins that are more or less simple operations on two
operands. */
for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
return ctz_hwi (mask);
}
-/* Like emit_multi_reg_push, but allowing for a different set of
+/* Like emit_multi_reg_push, but allowing for a different set of
registers to be described as saved. MASK is the set of registers
to be saved; REAL_REGS is the set of registers to be described as
saved. If REAL_REGS is 0, only describe the stack adjustment. */
if (arm_fpu_desc->model == ARM_FP_MODEL_VFP)
{
if (TARGET_HARD_FLOAT)
- asm_fprintf (asm_out_file, "\t.eabi_attribute 27, 3\n");
+ EMIT_EABI_ATTRIBUTE (Tag_ABI_HardFP_use, 27, 3);
if (TARGET_HARD_FLOAT_ABI)
- asm_fprintf (asm_out_file, "\t.eabi_attribute 28, 1\n");
+ EMIT_EABI_ATTRIBUTE (Tag_ABI_VFP_args, 28, 1);
}
}
asm_fprintf (asm_out_file, "\t.fpu %s\n", fpu_name);
are used. However we don't have any easy way of figuring this out.
Conservatively record the setting that would have been used. */
- /* Tag_ABI_FP_rounding. */
if (flag_rounding_math)
- asm_fprintf (asm_out_file, "\t.eabi_attribute 19, 1\n");
+ EMIT_EABI_ATTRIBUTE (Tag_ABI_FP_rounding, 19, 1);
+
if (!flag_unsafe_math_optimizations)
{
- /* Tag_ABI_FP_denomal. */
- asm_fprintf (asm_out_file, "\t.eabi_attribute 20, 1\n");
- /* Tag_ABI_FP_exceptions. */
- asm_fprintf (asm_out_file, "\t.eabi_attribute 21, 1\n");
+ EMIT_EABI_ATTRIBUTE (Tag_ABI_FP_denormal, 20, 1);
+ EMIT_EABI_ATTRIBUTE (Tag_ABI_FP_exceptions, 21, 1);
}
- /* Tag_ABI_FP_user_exceptions. */
if (flag_signaling_nans)
- asm_fprintf (asm_out_file, "\t.eabi_attribute 22, 1\n");
- /* Tag_ABI_FP_number_model. */
- asm_fprintf (asm_out_file, "\t.eabi_attribute 23, %d\n",
- flag_finite_math_only ? 1 : 3);
-
- /* Tag_ABI_align8_needed. */
- asm_fprintf (asm_out_file, "\t.eabi_attribute 24, 1\n");
- /* Tag_ABI_align8_preserved. */
- asm_fprintf (asm_out_file, "\t.eabi_attribute 25, 1\n");
- /* Tag_ABI_enum_size. */
- asm_fprintf (asm_out_file, "\t.eabi_attribute 26, %d\n",
- flag_short_enums ? 1 : 2);
+ EMIT_EABI_ATTRIBUTE (Tag_ABI_FP_user_exceptions, 22, 1);
+
+ EMIT_EABI_ATTRIBUTE (Tag_ABI_FP_number_model, 23,
+ flag_finite_math_only ? 1 : 3);
+
+ EMIT_EABI_ATTRIBUTE (Tag_ABI_align8_needed, 24, 1);
+ EMIT_EABI_ATTRIBUTE (Tag_ABI_align8_preserved, 25, 1);
+ EMIT_EABI_ATTRIBUTE (Tag_ABI_enum_size, 26, flag_short_enums ? 1 : 2);
/* Tag_ABI_optimization_goals. */
if (optimize_size)
val = 1;
else
val = 6;
- asm_fprintf (asm_out_file, "\t.eabi_attribute 30, %d\n", val);
+ EMIT_EABI_ATTRIBUTE (Tag_ABI_optimization_goals, 30, val);
- /* Tag_CPU_unaligned_access. */
- asm_fprintf (asm_out_file, "\t.eabi_attribute 34, %d\n",
- unaligned_access);
+ EMIT_EABI_ATTRIBUTE (Tag_CPU_unaligned_access, 34, unaligned_access);
- /* Tag_ABI_FP_16bit_format. */
if (arm_fp16_format)
- asm_fprintf (asm_out_file, "\t.eabi_attribute 38, %d\n",
- (int)arm_fp16_format);
+ EMIT_EABI_ATTRIBUTE (Tag_ABI_FP_16bit_format, 38, (int) arm_fp16_format);
if (arm_lang_output_object_attributes_hook)
arm_lang_output_object_attributes_hook();
}
- default_file_start();
+
+ default_file_start ();
}
static void
{
CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
int nregs;
-
+
cfun->machine->uses_anonymous_args = 1;
if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
{
}
else
nregs = pcum->nregs;
-
+
if (nregs < NUM_ARG_REGS)
*pretend_size = (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
}
}
/* Implement TARGET_CLASS_LIKELY_SPILLED_P.
-
+
We need to define this for LO_REGS on Thumb-1. Otherwise we can end up
using r0-r4 for function arguments, r7 for the stack frame and don't have
enough left over to do doubleword arithmetic. For Thumb-2 all the
const char *shift;
HOST_WIDE_INT val;
char c;
-
+
c = flag_chars[set_flags];
if (TARGET_UNIFIED_ASM)
{
switch (GET_MODE(diff_vec))
{
case QImode:
- return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned ?
+ return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned ?
"bl\t%___gnu_thumb1_case_uqi" : "bl\t%___gnu_thumb1_case_sqi");
case HImode:
- return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned ?
+ return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned ?
"bl\t%___gnu_thumb1_case_uhi" : "bl\t%___gnu_thumb1_case_shi");
case SImode:
return "bl\t%___gnu_thumb1_case_si";
/* The ARM ABI documents (10th October 2008) say that "__va_list"
has to be managled as if it is in the "std" namespace. */
- if (TARGET_AAPCS_BASED
+ if (TARGET_AAPCS_BASED
&& lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type))
{
static bool warned;
packed access. */
return ((misalignment % align) == 0);
}
-
+
return default_builtin_support_vector_misalignment (mode, type, misalignment,
is_packed);
}