gcc/ChangeLog
authorktkachov <ktkachov@138bc75d-0d04-0410-961f-82ee72b054a4>
Mon, 10 Dec 2012 11:09:12 +0000 (11:09 +0000)
committerktkachov <ktkachov@138bc75d-0d04-0410-961f-82ee72b054a4>
Mon, 10 Dec 2012 11:09:12 +0000 (11:09 +0000)
2012-12-10  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>

* config/arm/neon.ml (opcode): Add Vrintn, Vrinta, Vrintp, Vrintm,
Vrintz to type.
(type features): Add Requires_arch type constructor.
(ops): Define Vrintn, Vrinta, Vrintp, Vrintm, Vrintz features.
* config/arm/neon-docgen.ml (intrinsic_groups): Define Vrintn,
Vrinta, Vrintp, Vrintm, Vrintz, Vrintx.
* config/arm/neon-testgen.ml (effective_target): Define check for
Requires_arch 8.
* config/arm/neon-gen.ml
(print_feature_test_start): Handle Requires_arch.
(print_feature_test_end): Likewise.
Add 2012 to Copyright notice.
* doc/arm-neon-intrinsics.texi: Regenerate.
* config/arm/arm_neon.h: Regenerate.

gcc/testsuite/ChangeLog

2012-12-10  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>

* gcc.target/arm/neon/vrndaf32.c: New test.
* gcc.target/arm/neon/vrndqaf32.c: Likewise.
* gcc.target/arm/neon/vrndf32.c: Likewise.
* gcc.target/arm/neon/vrndqf32.c: Likewise.
* gcc.target/arm/neon/vrndmf32.c: Likewise.
* gcc.target/arm/neon/vrndqmf32.c: Likewise.
* gcc.target/arm/neon/vrndnf32.c: Likewise.
* gcc.target/arm/neon/vrndqnf32.c: Likewise.
* gcc.target/arm/neon/vrndpf32.c: Likewise.
* gcc.target/arm/neon/vrndqpf32.c: Likewise.

git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@194353 138bc75d-0d04-0410-961f-82ee72b054a4

18 files changed:
gcc/ChangeLog
gcc/config/arm/arm_neon.h
gcc/config/arm/neon-docgen.ml
gcc/config/arm/neon-gen.ml
gcc/config/arm/neon-testgen.ml
gcc/config/arm/neon.ml
gcc/doc/arm-neon-intrinsics.texi
gcc/testsuite/ChangeLog
gcc/testsuite/gcc.target/arm/neon/vrndaf32.c [new file with mode: 0644]
gcc/testsuite/gcc.target/arm/neon/vrndf32.c [new file with mode: 0644]
gcc/testsuite/gcc.target/arm/neon/vrndmf32.c [new file with mode: 0644]
gcc/testsuite/gcc.target/arm/neon/vrndnf32.c [new file with mode: 0644]
gcc/testsuite/gcc.target/arm/neon/vrndpf32.c [new file with mode: 0644]
gcc/testsuite/gcc.target/arm/neon/vrndqaf32.c [new file with mode: 0644]
gcc/testsuite/gcc.target/arm/neon/vrndqf32.c [new file with mode: 0644]
gcc/testsuite/gcc.target/arm/neon/vrndqmf32.c [new file with mode: 0644]
gcc/testsuite/gcc.target/arm/neon/vrndqnf32.c [new file with mode: 0644]
gcc/testsuite/gcc.target/arm/neon/vrndqpf32.c [new file with mode: 0644]

index b5c23a7..249004e 100644 (file)
@@ -1,3 +1,20 @@
+2012-12-10  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
+
+       * config/arm/neon.ml (opcode): Add Vrintn, Vrinta, Vrintp, Vrintm,
+       Vrintz to type.
+       (type features): Add Requires_arch type constructor.
+       (ops): Define Vrintn, Vrinta, Vrintp, Vrintm, Vrintz features.
+       * config/arm/neon-docgen.ml (intrinsic_groups): Define Vrintn,
+       Vrinta, Vrintp, Vrintm, Vrintz, Vrintx.
+       * config/arm/neon-testgen.ml (effective_target): Define check for 
+       Requires_arch 8.
+       * config/arm/neon-gen.ml 
+       (print_feature_test_start): Handle Requires_arch.
+       (print_feature_test_end): Likewise.
+       Add 2012 to Copyright notice.
+       * doc/arm-neon-intrinsics.texi: Regenerate.
+       * config/arm/arm_neon.h: Regenerate.
+
 2012-12-10 Kai Tietz  <ktietz@redhat.com>
 
        * stmt.c (expand_sjlj_dispatch_table): Fix off by one.
index 8fec83f..a79c922 100644 (file)
@@ -1,7 +1,7 @@
 /* ARM NEON intrinsics include file. This file is generated automatically
    using neon-gen.ml.  Please do not edit manually.
 
-   Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc.
+   Copyright (C) 2006, 2007, 2009, 2012 Free Software Foundation, Inc.
    Contributed by CodeSourcery.
 
    This file is part of GCC.
@@ -1382,6 +1382,86 @@ vfmsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
 }
 
 #endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndn_f32 (float32x2_t __a)
+{
+  return (float32x2_t)__builtin_neon_vrintnv2sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqn_f32 (float32x4_t __a)
+{
+  return (float32x4_t)__builtin_neon_vrintnv4sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrnda_f32 (float32x2_t __a)
+{
+  return (float32x2_t)__builtin_neon_vrintav2sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqa_f32 (float32x4_t __a)
+{
+  return (float32x4_t)__builtin_neon_vrintav4sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndp_f32 (float32x2_t __a)
+{
+  return (float32x2_t)__builtin_neon_vrintpv2sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqp_f32 (float32x4_t __a)
+{
+  return (float32x4_t)__builtin_neon_vrintpv4sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndm_f32 (float32x2_t __a)
+{
+  return (float32x2_t)__builtin_neon_vrintmv2sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqm_f32 (float32x4_t __a)
+{
+  return (float32x4_t)__builtin_neon_vrintmv4sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrnd_f32 (float32x2_t __a)
+{
+  return (float32x2_t)__builtin_neon_vrintzv2sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndq_f32 (float32x4_t __a)
+{
+  return (float32x4_t)__builtin_neon_vrintzv4sf (__a);
+}
+
+#endif
 __extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
 vsub_s8 (int8x8_t __a, int8x8_t __b)
 {
index 043b1e0..228de16 100644 (file)
@@ -105,6 +105,11 @@ let intrinsic_groups =
     "Multiply-subtract", single_opcode Vmls;
     "Fused-multiply-accumulate", single_opcode Vfma;
     "Fused-multiply-subtract", single_opcode Vfms;
+    "Round to integral (to nearest, ties to even)", single_opcode Vrintn;
+    "Round to integral (to nearest, ties away from zero)", single_opcode Vrinta;
+    "Round to integral (towards +Inf)", single_opcode Vrintp;
+    "Round to integral (towards -Inf)", single_opcode Vrintm;
+    "Round to integral (towards 0)", single_opcode Vrintz;
     "Subtraction", single_opcode Vsub;
     "Comparison (equal-to)", single_opcode Vceq;
     "Comparison (greater-than-or-equal-to)", single_opcode Vcge;
index 6c4e272..52ecb16 100644 (file)
@@ -290,17 +290,21 @@ let print_feature_test_start features =
   try
     match List.find (fun feature ->
                        match feature with Requires_feature _ -> true
+                                        | Requires_arch _ -> true
                                         | _ -> false)
                      features with
       Requires_feature feature -> 
         Format.printf "#ifdef __ARM_FEATURE_%s@\n" feature
+    | Requires_arch arch ->
+        Format.printf "#if __ARM_ARCH >= %d@\n" arch
     | _ -> assert false
   with Not_found -> assert true
 
 let print_feature_test_end features =
   let feature =
     List.exists (function Requires_feature x -> true
-                                        |  _ -> false) features in
+                          | Requires_arch x -> true
+                          |  _ -> false) features in
   if feature then Format.printf "#endif@\n"
 
 
@@ -437,7 +441,7 @@ let _ =
 "/* ARM NEON intrinsics include file. This file is generated automatically";
 "   using neon-gen.ml.  Please do not edit manually.";
 "";
-"   Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc.";
+"   Copyright (C) 2006, 2007, 2009, 2012 Free Software Foundation, Inc.";
 "   Contributed by CodeSourcery.";
 "";
 "   This file is part of GCC.";
index 4645f39..f6c8d9a 100644 (file)
@@ -162,9 +162,11 @@ let effective_target features =
   try
     match List.find (fun feature ->
                        match feature with Requires_feature _ -> true
+                                        | Requires_arch _ -> true
                                         | _ -> false)
                      features with
       Requires_feature "FMA" -> "arm_neonv2"
+    | Requires_arch 8 -> "arm_v8_neon"
     | _ -> assert false
   with Not_found -> "arm_neon"
 
index 101f8f6..5a5819f 100644 (file)
@@ -152,6 +152,11 @@ type opcode =
   | Vqdmulh_n
   | Vqdmulh_lane
   (* Unary ops.  *)
+  | Vrintn
+  | Vrinta
+  | Vrintp
+  | Vrintm
+  | Vrintz
   | Vabs
   | Vneg
   | Vcls
@@ -279,6 +284,7 @@ type features =
   | Fixed_core_reg
     (* Mark that the intrinsic requires __ARM_FEATURE_string to be defined.  *)
   | Requires_feature of string
+  | Requires_arch of int
 
 exception MixedMode of elts * elts
 
@@ -812,6 +818,27 @@ let ops =
     Vfms, [Requires_feature "FMA"], All (3, Dreg), "vfms", elts_same_io, [F32];
     Vfms, [Requires_feature "FMA"], All (3, Qreg), "vfmsQ", elts_same_io, [F32];
 
+    (* Round to integral. *)
+    Vrintn, [Builtin_name "vrintn"; Requires_arch 8], Use_operands [| Dreg; Dreg |],
+            "vrndn", elts_same_1, [F32];
+    Vrintn, [Builtin_name "vrintn"; Requires_arch 8], Use_operands [| Qreg; Qreg |],
+            "vrndqn", elts_same_1, [F32];
+    Vrinta, [Builtin_name "vrinta"; Requires_arch 8], Use_operands [| Dreg; Dreg |],
+            "vrnda", elts_same_1, [F32];
+    Vrinta, [Builtin_name "vrinta"; Requires_arch 8], Use_operands [| Qreg; Qreg |],
+            "vrndqa", elts_same_1, [F32];
+    Vrintp, [Builtin_name "vrintp"; Requires_arch 8], Use_operands [| Dreg; Dreg |],
+            "vrndp", elts_same_1, [F32];
+    Vrintp, [Builtin_name "vrintp"; Requires_arch 8], Use_operands [| Qreg; Qreg |],
+            "vrndqp", elts_same_1, [F32];
+    Vrintm, [Builtin_name "vrintm"; Requires_arch 8], Use_operands [| Dreg; Dreg |],
+            "vrndm", elts_same_1, [F32];
+    Vrintm, [Builtin_name "vrintm"; Requires_arch 8], Use_operands [| Qreg; Qreg |],
+            "vrndqm", elts_same_1, [F32];
+    Vrintz, [Builtin_name "vrintz"; Requires_arch 8], Use_operands [| Dreg; Dreg |],
+            "vrnd", elts_same_1, [F32];
+    Vrintz, [Builtin_name "vrintz"; Requires_arch 8], Use_operands [| Qreg; Qreg |],
+            "vrndq", elts_same_1, [F32];
     (* Subtraction.  *)
     Vsub, [], All (3, Dreg), "vsub", sign_invar_2, F32 :: su_8_32;
     Vsub, [No_op], All (3, Dreg), "vsub", sign_invar_2,  [S64; U64];
index 14e6264..4b0289a 100644 (file)
 
 
 
+@subsubsection Round to integral (to nearest, ties to even)
+
+@itemize @bullet
+@item float32x2_t vrndn_f32 (float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrintn.f32 @var{d0}, @var{d0}}
+@end itemize
+
+
+@itemize @bullet
+@item float32x4_t vrndqn_f32 (float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrintn.f32 @var{q0}, @var{q0}}
+@end itemize
+
+
+
+
+@subsubsection Round to integral (to nearest, ties away from zero)
+
+@itemize @bullet
+@item float32x2_t vrnda_f32 (float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrinta.f32 @var{d0}, @var{d0}}
+@end itemize
+
+
+@itemize @bullet
+@item float32x4_t vrndqa_f32 (float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrinta.f32 @var{q0}, @var{q0}}
+@end itemize
+
+
+
+
+@subsubsection Round to integral (towards +Inf)
+
+@itemize @bullet
+@item float32x2_t vrndp_f32 (float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrintp.f32 @var{d0}, @var{d0}}
+@end itemize
+
+
+@itemize @bullet
+@item float32x4_t vrndqp_f32 (float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrintp.f32 @var{q0}, @var{q0}}
+@end itemize
+
+
+
+
+@subsubsection Round to integral (towards -Inf)
+
+@itemize @bullet
+@item float32x2_t vrndm_f32 (float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrintm.f32 @var{d0}, @var{d0}}
+@end itemize
+
+
+@itemize @bullet
+@item float32x4_t vrndqm_f32 (float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrintm.f32 @var{q0}, @var{q0}}
+@end itemize
+
+
+
+
+@subsubsection Round to integral (towards 0)
+
+@itemize @bullet
+@item float32x2_t vrnd_f32 (float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrintz.f32 @var{d0}, @var{d0}}
+@end itemize
+
+
+@itemize @bullet
+@item float32x4_t vrndq_f32 (float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrintz.f32 @var{q0}, @var{q0}}
+@end itemize
+
+
+
+
 @subsubsection Subtraction
 
 @itemize @bullet
 @subsubsection Transpose elements
 
 @itemize @bullet
-@item uint32x2x2_t vtrn_u32 (uint32x2_t, uint32x2_t)
-@*@emph{Form of expected instruction(s):} @code{vtrn.32 @var{d0}, @var{d1}}
-@end itemize
-
-
-@itemize @bullet
 @item uint16x4x2_t vtrn_u16 (uint16x4_t, uint16x4_t)
 @*@emph{Form of expected instruction(s):} @code{vtrn.16 @var{d0}, @var{d1}}
 @end itemize
 
 
 @itemize @bullet
-@item int32x2x2_t vtrn_s32 (int32x2_t, int32x2_t)
-@*@emph{Form of expected instruction(s):} @code{vtrn.32 @var{d0}, @var{d1}}
+@item int16x4x2_t vtrn_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.16 @var{d0}, @var{d1}}
 @end itemize
 
 
 @itemize @bullet
-@item int16x4x2_t vtrn_s16 (int16x4_t, int16x4_t)
+@item int8x8x2_t vtrn_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.8 @var{d0}, @var{d1}}
+@end itemize
+
+
+@itemize @bullet
+@item poly16x4x2_t vtrn_p16 (poly16x4_t, poly16x4_t)
 @*@emph{Form of expected instruction(s):} @code{vtrn.16 @var{d0}, @var{d1}}
 @end itemize
 
 
 @itemize @bullet
-@item int8x8x2_t vtrn_s8 (int8x8_t, int8x8_t)
+@item poly8x8x2_t vtrn_p8 (poly8x8_t, poly8x8_t)
 @*@emph{Form of expected instruction(s):} @code{vtrn.8 @var{d0}, @var{d1}}
 @end itemize
 
 
 @itemize @bullet
 @item float32x2x2_t vtrn_f32 (float32x2_t, float32x2_t)
-@*@emph{Form of expected instruction(s):} @code{vtrn.32 @var{d0}, @var{d1}}
+@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{d0}, @var{d1}}
 @end itemize
 
 
 @itemize @bullet
-@item poly16x4x2_t vtrn_p16 (poly16x4_t, poly16x4_t)
-@*@emph{Form of expected instruction(s):} @code{vtrn.16 @var{d0}, @var{d1}}
+@item uint32x2x2_t vtrn_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{d0}, @var{d1}}
 @end itemize
 
 
 @itemize @bullet
-@item poly8x8x2_t vtrn_p8 (poly8x8_t, poly8x8_t)
-@*@emph{Form of expected instruction(s):} @code{vtrn.8 @var{d0}, @var{d1}}
+@item int32x2x2_t vtrn_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{d0}, @var{d1}}
 @end itemize
 
 
 @subsubsection Zip elements
 
 @itemize @bullet
-@item uint32x2x2_t vzip_u32 (uint32x2_t, uint32x2_t)
-@*@emph{Form of expected instruction(s):} @code{vzip.32 @var{d0}, @var{d1}}
-@end itemize
-
-
-@itemize @bullet
 @item uint16x4x2_t vzip_u16 (uint16x4_t, uint16x4_t)
 @*@emph{Form of expected instruction(s):} @code{vzip.16 @var{d0}, @var{d1}}
 @end itemize
 
 
 @itemize @bullet
-@item int32x2x2_t vzip_s32 (int32x2_t, int32x2_t)
-@*@emph{Form of expected instruction(s):} @code{vzip.32 @var{d0}, @var{d1}}
+@item int16x4x2_t vzip_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.16 @var{d0}, @var{d1}}
 @end itemize
 
 
 @itemize @bullet
-@item int16x4x2_t vzip_s16 (int16x4_t, int16x4_t)
+@item int8x8x2_t vzip_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.8 @var{d0}, @var{d1}}
+@end itemize
+
+
+@itemize @bullet
+@item poly16x4x2_t vzip_p16 (poly16x4_t, poly16x4_t)
 @*@emph{Form of expected instruction(s):} @code{vzip.16 @var{d0}, @var{d1}}
 @end itemize
 
 
 @itemize @bullet
-@item int8x8x2_t vzip_s8 (int8x8_t, int8x8_t)
+@item poly8x8x2_t vzip_p8 (poly8x8_t, poly8x8_t)
 @*@emph{Form of expected instruction(s):} @code{vzip.8 @var{d0}, @var{d1}}
 @end itemize
 
 
 @itemize @bullet
 @item float32x2x2_t vzip_f32 (float32x2_t, float32x2_t)
-@*@emph{Form of expected instruction(s):} @code{vzip.32 @var{d0}, @var{d1}}
+@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{d0}, @var{d1}}
 @end itemize
 
 
 @itemize @bullet
-@item poly16x4x2_t vzip_p16 (poly16x4_t, poly16x4_t)
-@*@emph{Form of expected instruction(s):} @code{vzip.16 @var{d0}, @var{d1}}
+@item uint32x2x2_t vzip_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{d0}, @var{d1}}
 @end itemize
 
 
 @itemize @bullet
-@item poly8x8x2_t vzip_p8 (poly8x8_t, poly8x8_t)
-@*@emph{Form of expected instruction(s):} @code{vzip.8 @var{d0}, @var{d1}}
+@item int32x2x2_t vzip_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{d0}, @var{d1}}
 @end itemize
 
 
 
 @itemize @bullet
 @item uint64x2_t vld1q_dup_u64 (const uint64_t *)
-@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
 @end itemize
 
 
 @itemize @bullet
 @item int64x2_t vld1q_dup_s64 (const int64_t *)
-@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
 @end itemize
 
 
index bdeb181..57caf46 100644 (file)
@@ -1,3 +1,16 @@
+2012-12-10  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
+
+       * gcc.target/arm/neon/vrndaf32.c: New test.
+       * gcc.target/arm/neon/vrndqaf32.c: Likewise.
+       * gcc.target/arm/neon/vrndf32.c: Likewise.
+       * gcc.target/arm/neon/vrndqf32.c: Likewise.
+       * gcc.target/arm/neon/vrndmf32.c: Likewise.
+       * gcc.target/arm/neon/vrndqmf32.c: Likewise.
+       * gcc.target/arm/neon/vrndnf32.c: Likewise.
+       * gcc.target/arm/neon/vrndqnf32.c: Likewise.
+       * gcc.target/arm/neon/vrndpf32.c: Likewise.
+       * gcc.target/arm/neon/vrndqpf32.c: Likewise.
+
 2012-12-09  John David Anglin  <dave.anglin@nrc-cnrc.gc.ca>
 
        * gcc.misc-tests/gcov-12.c: Fix dg order.
diff --git a/gcc/testsuite/gcc.target/arm/neon/vrndaf32.c b/gcc/testsuite/gcc.target/arm/neon/vrndaf32.c
new file mode 100644 (file)
index 0000000..02ca465
--- /dev/null
@@ -0,0 +1,20 @@
+/* Test the `vrndaf32' ARM Neon intrinsic.  */
+/* This file was autogenerated by neon-testgen.  */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_neon_ok } */
+/* { dg-options "-save-temps -O0" } */
+/* { dg-add-options arm_v8_neon } */
+
+#include "arm_neon.h"
+
+void test_vrndaf32 (void)
+{
+  float32x2_t out_float32x2_t;
+  float32x2_t arg0_float32x2_t;
+
+  out_float32x2_t = vrnda_f32 (arg0_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vrinta\.f32\[  \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[         \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/neon/vrndf32.c b/gcc/testsuite/gcc.target/arm/neon/vrndf32.c
new file mode 100644 (file)
index 0000000..b941657
--- /dev/null
@@ -0,0 +1,20 @@
+/* Test the `vrndf32' ARM Neon intrinsic.  */
+/* This file was autogenerated by neon-testgen.  */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_neon_ok } */
+/* { dg-options "-save-temps -O0" } */
+/* { dg-add-options arm_v8_neon } */
+
+#include "arm_neon.h"
+
+void test_vrndf32 (void)
+{
+  float32x2_t out_float32x2_t;
+  float32x2_t arg0_float32x2_t;
+
+  out_float32x2_t = vrnd_f32 (arg0_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vrintz\.f32\[  \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[         \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/neon/vrndmf32.c b/gcc/testsuite/gcc.target/arm/neon/vrndmf32.c
new file mode 100644 (file)
index 0000000..7f4e90b
--- /dev/null
@@ -0,0 +1,20 @@
+/* Test the `vrndmf32' ARM Neon intrinsic.  */
+/* This file was autogenerated by neon-testgen.  */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_neon_ok } */
+/* { dg-options "-save-temps -O0" } */
+/* { dg-add-options arm_v8_neon } */
+
+#include "arm_neon.h"
+
+void test_vrndmf32 (void)
+{
+  float32x2_t out_float32x2_t;
+  float32x2_t arg0_float32x2_t;
+
+  out_float32x2_t = vrndm_f32 (arg0_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vrintm\.f32\[  \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[         \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/neon/vrndnf32.c b/gcc/testsuite/gcc.target/arm/neon/vrndnf32.c
new file mode 100644 (file)
index 0000000..df8e3e9
--- /dev/null
@@ -0,0 +1,20 @@
+/* Test the `vrndnf32' ARM Neon intrinsic.  */
+/* This file was autogenerated by neon-testgen.  */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_neon_ok } */
+/* { dg-options "-save-temps -O0" } */
+/* { dg-add-options arm_v8_neon } */
+
+#include "arm_neon.h"
+
+void test_vrndnf32 (void)
+{
+  float32x2_t out_float32x2_t;
+  float32x2_t arg0_float32x2_t;
+
+  out_float32x2_t = vrndn_f32 (arg0_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vrintn\.f32\[  \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[         \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/neon/vrndpf32.c b/gcc/testsuite/gcc.target/arm/neon/vrndpf32.c
new file mode 100644 (file)
index 0000000..d3900cd
--- /dev/null
@@ -0,0 +1,20 @@
+/* Test the `vrndpf32' ARM Neon intrinsic.  */
+/* This file was autogenerated by neon-testgen.  */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_neon_ok } */
+/* { dg-options "-save-temps -O0" } */
+/* { dg-add-options arm_v8_neon } */
+
+#include "arm_neon.h"
+
+void test_vrndpf32 (void)
+{
+  float32x2_t out_float32x2_t;
+  float32x2_t arg0_float32x2_t;
+
+  out_float32x2_t = vrndp_f32 (arg0_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vrintp\.f32\[  \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[         \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/neon/vrndqaf32.c b/gcc/testsuite/gcc.target/arm/neon/vrndqaf32.c
new file mode 100644 (file)
index 0000000..b7b5d73
--- /dev/null
@@ -0,0 +1,20 @@
+/* Test the `vrndqaf32' ARM Neon intrinsic.  */
+/* This file was autogenerated by neon-testgen.  */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_neon_ok } */
+/* { dg-options "-save-temps -O0" } */
+/* { dg-add-options arm_v8_neon } */
+
+#include "arm_neon.h"
+
+void test_vrndqaf32 (void)
+{
+  float32x4_t out_float32x4_t;
+  float32x4_t arg0_float32x4_t;
+
+  out_float32x4_t = vrndqa_f32 (arg0_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vrinta\.f32\[  \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[         \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/neon/vrndqf32.c b/gcc/testsuite/gcc.target/arm/neon/vrndqf32.c
new file mode 100644 (file)
index 0000000..08b4b45
--- /dev/null
@@ -0,0 +1,20 @@
+/* Test the `vrndqf32' ARM Neon intrinsic.  */
+/* This file was autogenerated by neon-testgen.  */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_neon_ok } */
+/* { dg-options "-save-temps -O0" } */
+/* { dg-add-options arm_v8_neon } */
+
+#include "arm_neon.h"
+
+void test_vrndqf32 (void)
+{
+  float32x4_t out_float32x4_t;
+  float32x4_t arg0_float32x4_t;
+
+  out_float32x4_t = vrndq_f32 (arg0_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vrintz\.f32\[  \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[         \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/neon/vrndqmf32.c b/gcc/testsuite/gcc.target/arm/neon/vrndqmf32.c
new file mode 100644 (file)
index 0000000..6d16bfc
--- /dev/null
@@ -0,0 +1,20 @@
+/* Test the `vrndqmf32' ARM Neon intrinsic.  */
+/* This file was autogenerated by neon-testgen.  */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_neon_ok } */
+/* { dg-options "-save-temps -O0" } */
+/* { dg-add-options arm_v8_neon } */
+
+#include "arm_neon.h"
+
+void test_vrndqmf32 (void)
+{
+  float32x4_t out_float32x4_t;
+  float32x4_t arg0_float32x4_t;
+
+  out_float32x4_t = vrndqm_f32 (arg0_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vrintm\.f32\[  \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[         \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/neon/vrndqnf32.c b/gcc/testsuite/gcc.target/arm/neon/vrndqnf32.c
new file mode 100644 (file)
index 0000000..b31ca95
--- /dev/null
@@ -0,0 +1,20 @@
+/* Test the `vrndqnf32' ARM Neon intrinsic.  */
+/* This file was autogenerated by neon-testgen.  */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_neon_ok } */
+/* { dg-options "-save-temps -O0" } */
+/* { dg-add-options arm_v8_neon } */
+
+#include "arm_neon.h"
+
+void test_vrndqnf32 (void)
+{
+  float32x4_t out_float32x4_t;
+  float32x4_t arg0_float32x4_t;
+
+  out_float32x4_t = vrndqn_f32 (arg0_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vrintn\.f32\[  \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[         \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/neon/vrndqpf32.c b/gcc/testsuite/gcc.target/arm/neon/vrndqpf32.c
new file mode 100644 (file)
index 0000000..5c4a866
--- /dev/null
@@ -0,0 +1,20 @@
+/* Test the `vrndqpf32' ARM Neon intrinsic.  */
+/* This file was autogenerated by neon-testgen.  */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_neon_ok } */
+/* { dg-options "-save-temps -O0" } */
+/* { dg-add-options arm_v8_neon } */
+
+#include "arm_neon.h"
+
+void test_vrndqpf32 (void)
+{
+  float32x4_t out_float32x4_t;
+  float32x4_t arg0_float32x4_t;
+
+  out_float32x4_t = vrndqp_f32 (arg0_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vrintp\.f32\[  \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[         \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */