This patch adds two new patterns for the VLSTM and VLLDM instructions.
cmse_nonsecure_call_inline_register_clear is then modified to
generate VLSTM and VLLDM respectively before and after calls to
functions with the cmse_nonsecure_call attribute in order to have lazy
saving, clearing and restoring of VFP registers. Since these
instructions do not do writeback of the base register, the stack is adjusted
prior the lazy store and after the lazy load with appropriate frame
debug notes to describe the effect on the CFA register.
As with CLRM, VSCCLRM and VSTR/VLDR, the instruction is modeled as an
unspecified operation to the memory pointed to by the base register.
*** gcc/ChangeLog ***
2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
* config/arm/arm.c (arm_add_cfa_adjust_cfa_note): Declare early.
(cmse_nonsecure_call_inline_register_clear): Define new lazy_fpclear
variable as true when floating-point ABI is not hard. Replace
check against TARGET_HARD_FLOAT_ABI by checks against lazy_fpclear.
Generate VLSTM and VLLDM instruction respectively before and
after a function call to cmse_nonsecure_call function.
* config/arm/unspecs.md (VUNSPEC_VLSTM): Define unspec.
(VUNSPEC_VLLDM): Likewise.
* config/arm/vfp.md (lazy_store_multiple_insn): New define_insn.
(lazy_load_multiple_insn): Likewise.
*** gcc/testsuite/ChangeLog ***
2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
* gcc.target/arm/cmse/mainline/8_1m/soft/cmse-13.c: Add check for VLSTM and
VLLDM.
* gcc.target/arm/cmse/mainline/8_1m/soft/cmse-7.c: Likewise.
* gcc.target/arm/cmse/mainline/8_1m/soft/cmse-8.c: Likewise.
* gcc.target/arm/cmse/mainline/8_1m/softfp/cmse-13.c: Likewise.
* gcc.target/arm/cmse/mainline/8_1m/softfp/cmse-7.c: Likewise.
* gcc.target/arm/cmse/mainline/8_1m/softfp/cmse-8.c: Likewise.
* gcc.target/arm/cmse/mainline/8_1m/softfp-sp/cmse-7.c: Likewise.
* gcc.target/arm/cmse/mainline/8_1m/softfp-sp/cmse-8.c: Likewise.
2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
+ * config/arm/arm.c (arm_add_cfa_adjust_cfa_note): Declare early.
+ (cmse_nonsecure_call_inline_register_clear): Define new lazy_fpclear
+ variable as true when floating-point ABI is not hard. Replace
+ check against TARGET_HARD_FLOAT_ABI by checks against lazy_fpclear.
+ Generate VLSTM and VLLDM instruction respectively before and
+ after a function call to cmse_nonsecure_call function.
+ * config/arm/unspecs.md (VUNSPEC_VLSTM): Define unspec.
+ (VUNSPEC_VLLDM): Likewise.
+ * config/arm/vfp.md (lazy_store_multiple_insn): New define_insn.
+ (lazy_load_multiple_insn): Likewise.
+
+2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
+2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
* config/arm/arm.c (vfp_emit_fstmd): Declare early.
(arm_emit_vfp_multi_reg_pop): Likewise.
(cmse_nonsecure_call_inline_register_clear): Abstract number of VFP
static int arm_memory_move_cost (machine_mode, reg_class_t, bool);
static void emit_constant_insn (rtx cond, rtx pattern);
static rtx_insn *emit_set_insn (rtx, rtx);
+static void arm_add_cfa_adjust_cfa_note (rtx, int, rtx, rtx);
static rtx emit_multi_reg_push (unsigned long, unsigned long);
static void arm_emit_multi_reg_pop (unsigned long);
static int vfp_emit_fstmd (int, int);
FOR_BB_INSNS (bb, insn)
{
bool clear_callee_saved = TARGET_HAVE_FPCXT_CMSE;
+ /* frame = VFP regs + FPSCR + VPR. */
+ unsigned lazy_store_stack_frame_size
+ = (LAST_VFP_REGNUM - FIRST_VFP_REGNUM + 1 + 2) * UNITS_PER_WORD;
unsigned long callee_saved_mask
= ((1 << (LAST_HI_REGNUM + 1)) - 1)
& ~((1 << (LAST_ARG_REGNUM + 1)) - 1);
CUMULATIVE_ARGS args_so_far_v;
cumulative_args_t args_so_far;
tree arg_type, fntype;
- bool first_param = true;
+ bool first_param = true, lazy_fpclear = !TARGET_HARD_FLOAT_ABI;
function_args_iterator args_iter;
uint32_t padding_bits_to_clear[4] = {0U, 0U, 0U, 0U};
-mfloat-abi=hard. For -mfloat-abi=softfp we will be using the
lazy store and loads which clear both caller- and callee-saved
registers. */
- if (TARGET_HARD_FLOAT_ABI)
+ if (!lazy_fpclear)
{
auto_sbitmap float_bitmap (maxregno + 1);
disabled for pop (see below). */
RTX_FRAME_RELATED_P (push_insn) = 0;
+ /* Lazy store multiple. */
+ if (lazy_fpclear)
+ {
+ rtx imm;
+ rtx_insn *add_insn;
+
+ imm = gen_int_mode (- lazy_store_stack_frame_size, SImode);
+ add_insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx, imm));
+ arm_add_cfa_adjust_cfa_note (add_insn,
+ - lazy_store_stack_frame_size,
+ stack_pointer_rtx,
+ stack_pointer_rtx);
+ emit_insn (gen_lazy_store_multiple_insn (stack_pointer_rtx));
+ }
/* Save VFP callee-saved registers. */
- if (TARGET_HARD_FLOAT_ABI)
+ else
{
vfp_emit_fstmd (D7_VFP_REGNUM + 1,
(max_fp_regno - D7_VFP_REGNUM) / 2);
start_sequence ();
+ /* Lazy load multiple done as part of libcall in Armv8-M. */
+ if (lazy_fpclear)
+ {
+ rtx imm = gen_int_mode (lazy_store_stack_frame_size, SImode);
+ emit_insn (gen_lazy_load_multiple_insn (stack_pointer_rtx));
+ rtx_insn *add_insn =
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx, imm));
+ arm_add_cfa_adjust_cfa_note (add_insn,
+ lazy_store_stack_frame_size,
+ stack_pointer_rtx,
+ stack_pointer_rtx);
+ }
/* Restore VFP callee-saved registers. */
- if (TARGET_HARD_FLOAT_ABI)
+ else
{
int nb_callee_saved_vfp_regs =
(max_fp_regno - D7_VFP_REGNUM) / 2;
VUNSPEC_CLRM_APSR ; Represent the clearing of APSR with clrm instruction.
VUNSPEC_VSCCLRM_VPR ; Represent the clearing of VPR with vscclrm
; instruction.
+ VUNSPEC_VLSTM ; Represent the lazy store multiple with vlstm
+ ; instruction.
+ VUNSPEC_VLLDM ; Represent the lazy load multiple with vlldm
+ ; instruction.
])
;; Enumerators for NEON unspecs.
(set_attr "type" "mov_reg")]
)
+(define_insn "lazy_store_multiple_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "+&rk")
+ (post_dec:SI (match_dup 0)))
+ (unspec_volatile [(const_int 0)
+ (mem:SI (post_dec:SI (match_dup 0)))]
+ VUNSPEC_VLSTM)]
+ "use_cmse && reload_completed"
+ "vlstm%?\\t%0"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "store_4")]
+)
+
+(define_insn "lazy_load_multiple_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "+&rk")
+ (post_inc:SI (match_dup 0)))
+ (unspec_volatile:SI [(const_int 0)
+ (mem:SI (match_dup 0))]
+ VUNSPEC_VLLDM)]
+ "use_cmse && reload_completed"
+ "vlldm%?\\t%0"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "load_4")]
+)
+
(define_insn_and_split "*cmpsf_split_vfp"
[(set (reg:CCFP CC_REGNUM)
(compare:CCFP (match_operand:SF 0 "s_register_operand" "t")
2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
+ * gcc.target/arm/cmse/mainline/8_1m/soft/cmse-13.c: Add check for VLSTM and
+ VLLDM.
+ * gcc.target/arm/cmse/mainline/8_1m/soft/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/soft/cmse-8.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/softfp/cmse-13.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/softfp/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/softfp/cmse-8.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/softfp-sp/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/softfp-sp/cmse-8.c: Likewise.
+
+2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
+2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
* gcc.target/arm/cmse/mainline/8_1m/hard-sp/cmse-13.c: Add check for
VPUSH and VPOP and update expectation for VSCCLRM.
* gcc.target/arm/cmse/mainline/8_1m/hard-sp/cmse-7.c: Likewise.
/* { dg-final { scan-assembler-not "mov\tr2, r4" } } */
/* { dg-final { scan-assembler-not "mov\tr3, r4" } } */
/* { dg-final { scan-assembler "push\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
+/* { dg-final { scan-assembler "vlstm\tsp" } } */
/* { dg-final { scan-assembler "clrm\t\{r1, r5, r6, r7, r8, r9, r10, fp, ip, APSR\}" } } */
+/* { dg-final { scan-assembler "vlldm\tsp" } } */
/* { dg-final { scan-assembler "pop\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
/* { dg-final { scan-assembler-not "vmov" } } */
/* { dg-final { scan-assembler-not "vmsr" } } */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "push\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
+/* { dg-final { scan-assembler "vlstm\tsp" } } */
/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, r5, r6, r7, r8, r9, r10, fp, ip, APSR\}" } } */
+/* { dg-final { scan-assembler "vlldm\tsp" } } */
/* { dg-final { scan-assembler "pop\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
/* { dg-final { scan-assembler-not "vmov" } } */
/* { dg-final { scan-assembler-not "vmsr" } } */
/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
/* { dg-final { scan-assembler-not "mov\tr1, r4" } } */
/* { dg-final { scan-assembler "push\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
+/* { dg-final { scan-assembler "vlstm\tsp" } } */
/* { dg-final { scan-assembler "clrm\t\{r2, r3, r5, r6, r7, r8, r9, r10, fp, ip, APSR\}" } } */
+/* { dg-final { scan-assembler "vlldm\tsp" } } */
/* { dg-final { scan-assembler "pop\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
/* { dg-final { scan-assembler-not "vmov" } } */
/* { dg-final { scan-assembler-not "vmsr" } } */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "push\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
+/* { dg-final { scan-assembler "vlstm\tsp" } } */
/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, r5, r6, r7, r8, r9, r10, fp, ip, APSR\}" } } */
+/* { dg-final { scan-assembler "vlldm\tsp" } } */
/* { dg-final { scan-assembler "pop\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
/* Now we check that we use the correct intrinsic to call. */
/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
/* { dg-final { scan-assembler-not "mov\tr1, r4" } } */
/* { dg-final { scan-assembler "push\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
+/* { dg-final { scan-assembler "vlstm\tsp" } } */
/* { dg-final { scan-assembler "clrm\t\{r2, r3, r5, r6, r7, r8, r9, r10, fp, ip, APSR\}" } } */
+/* { dg-final { scan-assembler "vlldm\tsp" } } */
/* { dg-final { scan-assembler "pop\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
/* Now we check that we use the correct intrinsic to call. */
/* { dg-final { scan-assembler-not "mov\tr2, r4" } } */
/* { dg-final { scan-assembler-not "mov\tr3, r4" } } */
/* { dg-final { scan-assembler "push\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
+/* { dg-final { scan-assembler "vlstm\tsp" } } */
/* { dg-final { scan-assembler "clrm\t\{r1, r5, r6, r7, r8, r9, r10, fp, ip, APSR\}" } } */
+/* { dg-final { scan-assembler "vlldm\tsp" } } */
/* { dg-final { scan-assembler "pop\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
/* Now we check that we use the correct intrinsic to call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "push\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
+/* { dg-final { scan-assembler "vlstm\tsp" } } */
/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, r5, r6, r7, r8, r9, r10, fp, ip, APSR\}" } } */
+/* { dg-final { scan-assembler "vlldm\tsp" } } */
/* { dg-final { scan-assembler "pop\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
/* Now we check that we use the correct intrinsic to call. */
/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
/* { dg-final { scan-assembler-not "mov\tr1, r4" } } */
/* { dg-final { scan-assembler "push\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
+/* { dg-final { scan-assembler "vlstm\tsp" } } */
/* { dg-final { scan-assembler "clrm\t\{r2, r3, r5, r6, r7, r8, r9, r10, fp, ip, APSR\}" } } */
+/* { dg-final { scan-assembler "vlldm\tsp" } } */
/* { dg-final { scan-assembler "pop\t\{r4, r5, r6, r7, r8, r9, r10, fp\}" } } */
/* Now we check that we use the correct intrinsic to call. */