amdgcn: Add builtins for vectorized native versions of abs, floorf and floor
authorKwok Cheung Yeung <kcy@codesourcery.com>
Tue, 8 Nov 2022 11:59:58 +0000 (11:59 +0000)
committerKwok Cheung Yeung <kcy@codesourcery.com>
Tue, 8 Nov 2022 14:52:11 +0000 (14:52 +0000)
2022-11-08  Kwok Cheung Yeung  <kcy@codesourcery.com>

gcc/
* config/gcn/gcn-builtins.def (FABSV, FLOORVF, FLOORV): New builtins.
* config/gcn/gcn.cc (gcn_expand_builtin_1): Expand GCN_BUILTIN_FABSV,
GCN_BUILTIN_FLOORVF and GCN_BUILTIN_FLOORV.

gcc/testsuite/
* gcc.target/gcn/math-builtins-1.c: New test.

gcc/config/gcn/gcn-builtins.def
gcc/config/gcn/gcn.cc
gcc/testsuite/gcc.target/gcn/math-builtins-1.c [new file with mode: 0644]

index 2769190..c50777b 100644 (file)
@@ -64,6 +64,21 @@ DEF_BUILTIN (FABSVF, 3 /*CODE_FOR_fabsvf */,
             _A2 (GCN_BTI_V64SF, GCN_BTI_V64SF),
             gcn_expand_builtin_1)
 
+DEF_BUILTIN (FABSV, 3 /*CODE_FOR_fabsv */,
+            "fabsv", B_INSN,
+            _A2 (GCN_BTI_V64DF, GCN_BTI_V64DF),
+            gcn_expand_builtin_1)
+
+DEF_BUILTIN (FLOORVF, 3 /*CODE_FOR_floorvf */,
+            "floorvf", B_INSN,
+            _A2 (GCN_BTI_V64SF, GCN_BTI_V64SF),
+            gcn_expand_builtin_1)
+
+DEF_BUILTIN (FLOORV, 3 /*CODE_FOR_floorv */,
+            "floorv", B_INSN,
+            _A2 (GCN_BTI_V64DF, GCN_BTI_V64DF),
+            gcn_expand_builtin_1)
+
 DEF_BUILTIN (LDEXPVF, 3 /*CODE_FOR_ldexpvf */,
             "ldexpvf", B_INSN,
             _A3 (GCN_BTI_V64SF, GCN_BTI_V64SF, GCN_BTI_V64SI),
index 1996115..9c5e341 100644 (file)
@@ -4329,6 +4329,39 @@ gcn_expand_builtin_1 (tree exp, rtx target, rtx /*subtarget */ ,
        emit_insn (gen_absv64sf2 (target, arg));
        return target;
       }
+    case GCN_BUILTIN_FABSV:
+      {
+       if (ignore)
+         return target;
+       rtx arg = force_reg (V64DFmode,
+                            expand_expr (CALL_EXPR_ARG (exp, 0), NULL_RTX,
+                                         V64DFmode,
+                                         EXPAND_NORMAL));
+       emit_insn (gen_absv64df2 (target, arg));
+       return target;
+      }
+    case GCN_BUILTIN_FLOORVF:
+      {
+       if (ignore)
+         return target;
+       rtx arg = force_reg (V64SFmode,
+                            expand_expr (CALL_EXPR_ARG (exp, 0), NULL_RTX,
+                                         V64SFmode,
+                                         EXPAND_NORMAL));
+       emit_insn (gen_floorv64sf2 (target, arg));
+       return target;
+      }
+    case GCN_BUILTIN_FLOORV:
+      {
+       if (ignore)
+         return target;
+       rtx arg = force_reg (V64DFmode,
+                            expand_expr (CALL_EXPR_ARG (exp, 0), NULL_RTX,
+                                         V64DFmode,
+                                         EXPAND_NORMAL));
+       emit_insn (gen_floorv64df2 (target, arg));
+       return target;
+      }
     case GCN_BUILTIN_LDEXPVF:
       {
        if (ignore)
diff --git a/gcc/testsuite/gcc.target/gcn/math-builtins-1.c b/gcc/testsuite/gcc.target/gcn/math-builtins-1.c
new file mode 100644 (file)
index 0000000..e1aadfb
--- /dev/null
@@ -0,0 +1,33 @@
+/* { dg-do compile } */
+/* { dg-options "-O1" } */
+
+typedef float v64sf __attribute__ ((vector_size (256)));
+typedef double v64df __attribute__ ((vector_size (512)));
+typedef int v64si __attribute__ ((vector_size (256)));
+typedef long v64di __attribute__ ((vector_size (512)));
+
+v64sf f (v64sf _x, v64si _y)
+{
+  v64sf x = _x;
+  v64si y = _y;
+  x = __builtin_gcn_fabsvf (x); /* { dg-final { scan-assembler "v_add_f32\\s+v\[0-9\]+, 0, |v\[0-9\]+|" } } */
+  x = __builtin_gcn_floorvf (x); /* { dg-final { scan-assembler "v_floor_f32\\s+v\[0-9\]+, v\[0-9\]+" } }*/
+  x = __builtin_gcn_frexpvf_mant (x); /* { dg-final { scan-assembler "v_frexp_mant_f32\\s+v\[0-9\]+, v\[0-9\]+" } }*/
+  y = __builtin_gcn_frexpvf_exp (x); /* { dg-final { scan-assembler "v_frexp_exp_i32_f32\\s+v\[0-9\]+, v\[0-9\]+" } }*/
+  x = __builtin_gcn_ldexpvf (x, y); /* { dg-final { scan-assembler "v_ldexp_f32\\s+v\[0-9\]+, v\[0-9\]+, v\[0-9\]+" } }*/
+
+  return x;
+}
+
+v64df g (v64df _x, v64si _y)
+{
+  v64df x = _x;
+  v64si y = _y;
+  x = __builtin_gcn_fabsv (x); /* { dg-final { scan-assembler "v_add_f64\\s+v\\\[\[0-9\]+:\[0-9]+\\\], 0, |v\\\[\[0-9\]+:\[0-9\]+\\\]|" } } */
+  x = __builtin_gcn_floorv (x); /* { dg-final { scan-assembler "v_floor_f64\\s+v\\\[\[0-9\]+:\[0-9]+\\\], v\\\[\[0-9\]+:\[0-9]+\\\]" } }*/
+  x = __builtin_gcn_frexpv_mant (x); /* { dg-final { scan-assembler "v_frexp_mant_f64\\s+v\\\[\[0-9\]+:\[0-9]+\\\], v\\\[\[0-9\]+:\[0-9]+\\\]" } }*/
+  y = __builtin_gcn_frexpv_exp (x); /* { dg-final { scan-assembler "v_frexp_exp_i32_f64\\s+v\[0-9\]+, v\\\[\[0-9\]+:\[0-9]+\\\]" } }*/
+  x = __builtin_gcn_ldexpv (x, y); /* { dg-final { scan-assembler "v_ldexp_f64\\s+v\\\[\[0-9\]+:\[0-9]+\\\], v\\\[\[0-9\]+:\[0-9]+\\\], v\[0-9\]+" } }*/
+
+  return x;
+}