This can give up to 2x speedup on many AArch64 implementations. Also model
the crypto instructions on Cortex-A57 according to the Optimization Guide.
gcc/
* config/aarch64/aarch64.c (cortexa53_tunings): Enable AES fusion.
(cortexa57_tunings): Likewise.
(cortexa72_tunings): Likewise.
(arch_macro_fusion_pair_p): Add support for AES fusion.
* config/aarch64/aarch64-fusion-pairs.def: Add AES_AESMC entry.
* config/arm/aarch-common.c (aarch_crypto_can_dual_issue):
Allow virtual registers before reload so early scheduling works.
* config/arm/cortex-a57.md (cortex_a57_crypto_simple): Use
correct latency and pipeline.
(cortex_a57_crypto_complex): Likewise.
(cortex_a57_crypto_xor): Likewise.
(define_bypass): Add AES bypass.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@233268
138bc75d-0d04-0410-961f-
82ee72b054a4
+2016-02-10 Wilco Dijkstra <wdijkstr@arm.com>
+
+ * config/aarch64/aarch64.c (cortexa53_tunings): Enable AES fusion.
+ (cortexa57_tunings): Likewise.
+ (cortexa72_tunings): Likewise.
+ (arch_macro_fusion_pair_p): Add support for AES fusion.
+ * config/aarch64/aarch64-fusion-pairs.def: Add AES_AESMC entry.
+ * config/arm/aarch-common.c (aarch_crypto_can_dual_issue):
+ Allow virtual registers before reload so early scheduling works.
+ * config/arm/cortex-a57.md (cortex_a57_crypto_simple): Use
+ correct latency and pipeline.
+ (cortex_a57_crypto_complex): Likewise.
+ (cortex_a57_crypto_xor): Likewise.
+ (define_bypass): Add AES bypass.
+
2016-02-10 Richard Biener <rguenther@suse.de>
PR tree-optimization/69726
AARCH64_FUSION_PAIR ("movk+movk", MOVK_MOVK)
AARCH64_FUSION_PAIR ("adrp+ldr", ADRP_LDR)
AARCH64_FUSION_PAIR ("cmp+branch", CMP_BRANCH)
+AARCH64_FUSION_PAIR ("aes+aesmc", AES_AESMC)
&generic_branch_cost,
4, /* memmov_cost */
2, /* issue_rate */
- (AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
+ (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
| AARCH64_FUSE_MOVK_MOVK | AARCH64_FUSE_ADRP_LDR), /* fusible_ops */
8, /* function_align. */
8, /* jump_align. */
&cortexa57_branch_cost,
4, /* memmov_cost */
3, /* issue_rate */
- (AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
+ (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
| AARCH64_FUSE_MOVK_MOVK), /* fusible_ops */
16, /* function_align. */
8, /* jump_align. */
&generic_branch_cost,
4, /* memmov_cost */
3, /* issue_rate */
- (AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
+ (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
| AARCH64_FUSE_MOVK_MOVK), /* fusible_ops */
16, /* function_align. */
8, /* jump_align. */
}
}
+ if ((aarch64_tune_params.fusible_ops & AARCH64_FUSE_AES_AESMC)
+ && aarch_crypto_can_dual_issue (prev, curr))
+ return true;
+
if ((aarch64_tune_params.fusible_ops & AARCH64_FUSE_CMP_BRANCH)
&& any_condjump_p (curr))
{
{
unsigned int regno = REGNO (SET_DEST (producer_set));
- return REGNO (SET_DEST (consumer_set)) == regno
- && REGNO (XVECEXP (consumer_src, 0, 0)) == regno;
+ /* Before reload the registers are virtual, so the destination of
+ consumer_set doesn't need to match. */
+
+ return (REGNO (SET_DEST (consumer_set)) == regno || !reload_completed)
+ && REGNO (XVECEXP (consumer_src, 0, 0)) == regno;
}
return 0;
neon_fp_sqrt_s_q, neon_fp_sqrt_d_q"))
"ca57_cx2_block*3")
-(define_insn_reservation "cortex_a57_crypto_simple" 4
+(define_insn_reservation "cortex_a57_crypto_simple" 3
(and (eq_attr "tune" "cortexa57")
(eq_attr "type" "crypto_aese,crypto_aesmc,crypto_sha1_fast,crypto_sha256_fast"))
- "ca57_cx2")
+ "ca57_cx1")
-(define_insn_reservation "cortex_a57_crypto_complex" 7
+(define_insn_reservation "cortex_a57_crypto_complex" 6
(and (eq_attr "tune" "cortexa57")
(eq_attr "type" "crypto_sha1_slow,crypto_sha256_slow"))
- "ca57_cx2+(ca57_cx2_issue,ca57_cx2)")
+ "ca57_cx1*2")
-(define_insn_reservation "cortex_a57_crypto_xor" 7
+(define_insn_reservation "cortex_a57_crypto_xor" 6
(and (eq_attr "tune" "cortexa57")
(eq_attr "type" "crypto_sha1_xor"))
- "(ca57_cx1+ca57_cx2)")
+ "(ca57_cx1*2)|(ca57_cx2*2)")
;; We lie with calls. They take up all issue slots, but are otherwise
;; not harmful.
(define_bypass 1 "cortex_a57_*"
"cortex_a57_call,cortex_a57_branch")
+;; AESE+AESMC and AESD+AESIMC pairs forward with zero latency
+(define_bypass 0 "cortex_a57_crypto_simple"
+ "cortex_a57_crypto_simple"
+ "aarch_crypto_can_dual_issue")
+