rs6000.c (rs6000_gimple_fold_builtin): Add support for gimple folding of vec_madd...
authorWill Schmidt <will_schmidt@vnet.ibm.com>
Mon, 30 Oct 2017 14:10:17 +0000 (14:10 +0000)
committerWill Schmidt <willschm@gcc.gnu.org>
Mon, 30 Oct 2017 14:10:17 +0000 (14:10 +0000)
[gcc]

2017-10-30  Will Schmidt <will_schmidt@vnet.ibm.com>

* config/rs6000/rs6000.c (rs6000_gimple_fold_builtin): Add support for
gimple folding of vec_madd() intrinsics.
* config/rs6000/altivec.md (mulv8hi3): Rename altivec_vmladduhm to
fmav8hi4.  (altivec_vmladduhm): Rename to fmav8hi4.
* config/rs6000/rs6000-builtin.def: Rename vmladduhm to fmav8hi4

From-SVN: r254221

gcc/ChangeLog
gcc/config/rs6000/altivec.md
gcc/config/rs6000/rs6000-builtin.def
gcc/config/rs6000/rs6000.c

index 5468ee6..c690e33 100644 (file)
@@ -1,3 +1,11 @@
+2017-10-30  Will Schmidt  <will_schmidt@vnet.ibm.com>
+
+       * config/rs6000/rs6000.c (rs6000_gimple_fold_builtin): Add support for
+       gimple folding of vec_madd() intrinsics.
+       * config/rs6000/altivec.md (mulv8hi3): Rename altivec_vmladduhm to
+       fmav8hi4.  (altivec_vmladduhm): Rename to fmav8hi4.
+       * config/rs6000/rs6000-builtin.def: Rename vmladduhm to fmav8hi4
+
 2017-10-30  Richard Biener  <rguenther@suse.de>
 
        PR tree-optimization/82762
index 6ea529b..b2f173d 100644 (file)
   rtx zero = gen_reg_rtx (V8HImode);
 
   emit_insn (gen_altivec_vspltish (zero, const0_rtx));
-  emit_insn (gen_altivec_vmladduhm(operands[0], operands[1], operands[2], zero));
+  emit_insn (gen_fmav8hi4 (operands[0], operands[1], operands[2], zero));
 
   DONE;
 })
 
+
 ;; Fused multiply subtract 
 (define_insn "*altivec_vnmsubfp"
   [(set (match_operand:V4SF 0 "register_operand" "=v")
   "vmhraddshs %0,%1,%2,%3"
   [(set_attr "type" "veccomplex")])
 
-(define_insn "altivec_vmladduhm"
+(define_insn "fmav8hi4"
   [(set (match_operand:V8HI 0 "register_operand" "=v")
         (plus:V8HI (mult:V8HI (match_operand:V8HI 1 "register_operand" "v")
                              (match_operand:V8HI 2 "register_operand" "v"))
index 9804675..c8a425c 100644 (file)
@@ -961,7 +961,7 @@ BU_SPECIAL_X (RS6000_BUILTIN_NONE, NULL, 0, RS6000_BTC_MISC)
 BU_ALTIVEC_3 (VMADDFP,        "vmaddfp",        FP,            fmav4sf4)
 BU_ALTIVEC_3 (VMHADDSHS,      "vmhaddshs",      SAT,           altivec_vmhaddshs)
 BU_ALTIVEC_3 (VMHRADDSHS,     "vmhraddshs",     SAT,           altivec_vmhraddshs)
-BU_ALTIVEC_3 (VMLADDUHM,      "vmladduhm",      CONST,         altivec_vmladduhm)
+BU_ALTIVEC_3 (VMLADDUHM,      "vmladduhm",      CONST,         fmav8hi4)
 BU_ALTIVEC_3 (VMSUMUBM,       "vmsumubm",       CONST,         altivec_vmsumubm)
 BU_ALTIVEC_3 (VMSUMMBM,       "vmsummbm",       CONST,         altivec_vmsummbm)
 BU_ALTIVEC_3 (VMSUMUHM,       "vmsumuhm",       CONST,         altivec_vmsumuhm)
index 3162d52..182dc30 100644 (file)
@@ -16647,6 +16647,22 @@ rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
         gsi_replace (gsi, g, true);
         return true;
       }
+
+    /* Vector Fused multiply-add (fma).  */
+    case ALTIVEC_BUILTIN_VMADDFP:
+    case VSX_BUILTIN_XVMADDDP:
+    case ALTIVEC_BUILTIN_VMLADDUHM:
+      {
+       arg0 = gimple_call_arg (stmt, 0);
+       arg1 = gimple_call_arg (stmt, 1);
+       tree arg2 = gimple_call_arg (stmt, 2);
+       lhs = gimple_call_lhs (stmt);
+       gimple *g = gimple_build_assign (lhs, FMA_EXPR , arg0, arg1, arg2);
+       gimple_set_location (g, gimple_location (stmt));
+       gsi_replace (gsi, g, true);
+       return true;
+      }
+
     default:
        if (TARGET_DEBUG_BUILTIN)
           fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",