AArch64 [3/10]
authormshawcroft <mshawcroft@138bc75d-0d04-0410-961f-82ee72b054a4>
Tue, 23 Oct 2012 17:02:30 +0000 (17:02 +0000)
committermshawcroft <mshawcroft@138bc75d-0d04-0410-961f-82ee72b054a4>
Tue, 23 Oct 2012 17:02:30 +0000 (17:02 +0000)
2012-10-23  Ian Bolton  <ian.bolton@arm.com>
    James Greenhalgh  <james.greenhalgh@arm.com>
    Jim MacArthur  <jim.macarthur@arm.com>
    Chris Schlumberger-Socha <chris.schlumberger-socha@arm.com>
    Marcus Shawcroft  <marcus.shawcroft@arm.com>
    Nigel Stephens  <nigel.stephens@arm.com>
    Ramana Radhakrishnan  <ramana.radhakrishnan@arm.com>
    Richard Earnshaw  <rearnsha@arm.com>
    Sofiane Naci  <sofiane.naci@arm.com>
    Stephen Thomas  <stephen.thomas@arm.com>
    Tejas Belagod  <tejas.belagod@arm.com>
    Yufeng Zhang  <yufeng.zhang@arm.com>

* common/config/aarch64/aarch64-common.c: New file.
* config/aarch64/aarch64-arches.def: New file.
* config/aarch64/aarch64-builtins.c: New file.
* config/aarch64/aarch64-cores.def: New file.
* config/aarch64/aarch64-elf-raw.h: New file.
* config/aarch64/aarch64-elf.h: New file.
* config/aarch64/aarch64-generic.md: New file.
* config/aarch64/aarch64-linux.h: New file.
* config/aarch64/aarch64-modes.def: New file.
* config/aarch64/aarch64-option-extensions.def: New file.
* config/aarch64/aarch64-opts.h: New file.
* config/aarch64/aarch64-protos.h: New file.
* config/aarch64/aarch64-simd.md: New file.
* config/aarch64/aarch64-tune.md: New file.
* config/aarch64/aarch64.c: New file.
* config/aarch64/aarch64.h: New file.
* config/aarch64/aarch64.md: New file.
* config/aarch64/aarch64.opt: New file.
* config/aarch64/arm_neon.h: New file.
* config/aarch64/constraints.md: New file.
* config/aarch64/gentune.sh: New file.
* config/aarch64/iterators.md: New file.
* config/aarch64/large.md: New file.
* config/aarch64/predicates.md: New file.
* config/aarch64/small.md: New file.
* config/aarch64/sync.md: New file.
* config/aarch64/t-aarch64-linux: New file.
* config/aarch64/t-aarch64: New file.

git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@192723 138bc75d-0d04-0410-961f-82ee72b054a4

29 files changed:
gcc/ChangeLog
gcc/common/config/aarch64/aarch64-common.c [new file with mode: 0644]
gcc/config/aarch64/aarch64-arches.def [new file with mode: 0644]
gcc/config/aarch64/aarch64-builtins.c [new file with mode: 0644]
gcc/config/aarch64/aarch64-cores.def [new file with mode: 0644]
gcc/config/aarch64/aarch64-elf-raw.h [new file with mode: 0644]
gcc/config/aarch64/aarch64-elf.h [new file with mode: 0644]
gcc/config/aarch64/aarch64-generic.md [new file with mode: 0644]
gcc/config/aarch64/aarch64-linux.h [new file with mode: 0644]
gcc/config/aarch64/aarch64-modes.def [new file with mode: 0644]
gcc/config/aarch64/aarch64-option-extensions.def [new file with mode: 0644]
gcc/config/aarch64/aarch64-opts.h [new file with mode: 0644]
gcc/config/aarch64/aarch64-protos.h [new file with mode: 0644]
gcc/config/aarch64/aarch64-simd.md [new file with mode: 0644]
gcc/config/aarch64/aarch64-tune.md [new file with mode: 0644]
gcc/config/aarch64/aarch64.c [new file with mode: 0644]
gcc/config/aarch64/aarch64.h [new file with mode: 0644]
gcc/config/aarch64/aarch64.md [new file with mode: 0644]
gcc/config/aarch64/aarch64.opt [new file with mode: 0644]
gcc/config/aarch64/arm_neon.h [new file with mode: 0644]
gcc/config/aarch64/constraints.md [new file with mode: 0644]
gcc/config/aarch64/gentune.sh [new file with mode: 0644]
gcc/config/aarch64/iterators.md [new file with mode: 0644]
gcc/config/aarch64/large.md [new file with mode: 0644]
gcc/config/aarch64/predicates.md [new file with mode: 0644]
gcc/config/aarch64/small.md [new file with mode: 0644]
gcc/config/aarch64/sync.md [new file with mode: 0644]
gcc/config/aarch64/t-aarch64 [new file with mode: 0644]
gcc/config/aarch64/t-aarch64-linux [new file with mode: 0644]

index aa619b7..ee8f79f 100644 (file)
@@ -1,3 +1,45 @@
+2012-10-23  Ian Bolton  <ian.bolton@arm.com>
+           James Greenhalgh  <james.greenhalgh@arm.com>
+           Jim MacArthur  <jim.macarthur@arm.com>
+           Chris Schlumberger-Socha <chris.schlumberger-socha@arm.com>
+           Marcus Shawcroft  <marcus.shawcroft@arm.com>
+           Nigel Stephens  <nigel.stephens@arm.com>
+           Ramana Radhakrishnan  <ramana.radhakrishnan@arm.com>
+           Richard Earnshaw  <rearnsha@arm.com>
+           Sofiane Naci  <sofiane.naci@arm.com>
+           Stephen Thomas  <stephen.thomas@arm.com>
+           Tejas Belagod  <tejas.belagod@arm.com>
+           Yufeng Zhang  <yufeng.zhang@arm.com>
+
+       * common/config/aarch64/aarch64-common.c: New file.
+       * config/aarch64/aarch64-arches.def: New file.
+       * config/aarch64/aarch64-builtins.c: New file.
+       * config/aarch64/aarch64-cores.def: New file.
+       * config/aarch64/aarch64-elf-raw.h: New file.
+       * config/aarch64/aarch64-elf.h: New file.
+       * config/aarch64/aarch64-generic.md: New file.
+       * config/aarch64/aarch64-linux.h: New file.
+       * config/aarch64/aarch64-modes.def: New file.
+       * config/aarch64/aarch64-option-extensions.def: New file.
+       * config/aarch64/aarch64-opts.h: New file.
+       * config/aarch64/aarch64-protos.h: New file.
+       * config/aarch64/aarch64-simd.md: New file.
+       * config/aarch64/aarch64-tune.md: New file.
+       * config/aarch64/aarch64.c: New file.
+       * config/aarch64/aarch64.h: New file.
+       * config/aarch64/aarch64.md: New file.
+       * config/aarch64/aarch64.opt: New file.
+       * config/aarch64/arm_neon.h: New file.
+       * config/aarch64/constraints.md: New file.
+       * config/aarch64/gentune.sh: New file.
+       * config/aarch64/iterators.md: New file.
+       * config/aarch64/large.md: New file.
+       * config/aarch64/predicates.md: New file.
+       * config/aarch64/small.md: New file.
+       * config/aarch64/sync.md: New file.
+       * config/aarch64/t-aarch64-linux: New file.
+       * config/aarch64/t-aarch64: New file.
+
 2012-10-23  Jakub Jelinek  <jakub@redhat.com>
 
        PR c++/54988
diff --git a/gcc/common/config/aarch64/aarch64-common.c b/gcc/common/config/aarch64/aarch64-common.c
new file mode 100644 (file)
index 0000000..bd249e1
--- /dev/null
@@ -0,0 +1,88 @@
+/* Common hooks for AArch64.
+   Copyright (C) 2012 Free Software Foundation, Inc.
+   Contributed by ARM Ltd.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published
+   by the Free Software Foundation; either version 3, or (at your
+   option) any later version.
+
+   GCC is distributed in the hope that it will be useful, but WITHOUT
+   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+   License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tm_p.h"
+#include "common/common-target.h"
+#include "common/common-target-def.h"
+#include "opts.h"
+#include "flags.h"
+
+#ifdef  TARGET_BIG_ENDIAN_DEFAULT
+#undef  TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS (MASK_BIG_END)
+#endif
+
+#undef  TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION aarch64_handle_option
+
+#undef TARGET_OPTION_OPTIMIZATION_TABLE
+#define TARGET_OPTION_OPTIMIZATION_TABLE aarch_option_optimization_table
+
+/* Set default optimization options.  */
+static const struct default_options aarch_option_optimization_table[] =
+  {
+    /* Enable section anchors by default at -O1 or higher.  */
+    { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 },
+    { OPT_LEVELS_NONE, 0, NULL, 0 }
+  };
+
+/* Implement TARGET_HANDLE_OPTION.
+   This function handles the target specific options for CPU/target selection.
+
+   march wins over mcpu, so when march is defined, mcpu takes the same value,
+   otherwise march remains undefined. mtune can be used with either march or
+   mcpu. If march and mcpu are used together, the rightmost option wins.
+   mtune can be used with either march or mcpu.  */
+
+static bool
+aarch64_handle_option (struct gcc_options *opts,
+                      struct gcc_options *opts_set ATTRIBUTE_UNUSED,
+                      const struct cl_decoded_option *decoded,
+                      location_t loc ATTRIBUTE_UNUSED)
+{
+  size_t code = decoded->opt_index;
+  const char *arg = decoded->arg;
+
+  switch (code)
+    {
+    case OPT_march_:
+      opts->x_aarch64_arch_string = arg;
+      opts->x_aarch64_cpu_string = arg;
+      return true;
+
+    case OPT_mcpu_:
+      opts->x_aarch64_cpu_string = arg;
+      opts->x_aarch64_arch_string = NULL;
+      return true;
+
+    case OPT_mtune_:
+      opts->x_aarch64_tune_string = arg;
+      return true;
+
+    default:
+      return true;
+    }
+}
+
+struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
diff --git a/gcc/config/aarch64/aarch64-arches.def b/gcc/config/aarch64/aarch64-arches.def
new file mode 100644 (file)
index 0000000..3ac34ba
--- /dev/null
@@ -0,0 +1,29 @@
+/* Copyright (C) 2011, 2012 Free Software Foundation, Inc.
+   Contributed by ARM Ltd.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published
+   by the Free Software Foundation; either version 3, or (at your
+   option) any later version.
+
+   GCC is distributed in the hope that it will be useful, but WITHOUT
+   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+   License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+/* Before using #include to read this file, define a macro:
+
+      AARCH64_ARCH(NAME, CORE, ARCH, FLAGS)
+
+   The NAME is the name of the architecture, represented as a string
+   constant.  The CORE is the identifier for a core representative of
+   this architecture.  ARCH is the architecture revision.  FLAGS are
+   the flags implied by the architecture.  */
+
+AARCH64_ARCH("armv8-a",              generic,       8,  AARCH64_FL_FOR_ARCH8)
diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c
new file mode 100644 (file)
index 0000000..429a0df
--- /dev/null
@@ -0,0 +1,1320 @@
+/* Builtins' description for AArch64 SIMD architecture.
+   Copyright (C) 2011, 2012 Free Software Foundation, Inc.
+   Contributed by ARM Ltd.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tree.h"
+#include "expr.h"
+#include "tm_p.h"
+#include "recog.h"
+#include "langhooks.h"
+#include "diagnostic-core.h"
+#include "optabs.h"
+
+enum aarch64_simd_builtin_type_bits
+{
+  T_V8QI = 0x0001,
+  T_V4HI = 0x0002,
+  T_V2SI = 0x0004,
+  T_V2SF = 0x0008,
+  T_DI = 0x0010,
+  T_DF = 0x0020,
+  T_V16QI = 0x0040,
+  T_V8HI = 0x0080,
+  T_V4SI = 0x0100,
+  T_V4SF = 0x0200,
+  T_V2DI = 0x0400,
+  T_V2DF = 0x0800,
+  T_TI = 0x1000,
+  T_EI = 0x2000,
+  T_OI = 0x4000,
+  T_XI = 0x8000,
+  T_SI = 0x10000,
+  T_HI = 0x20000,
+  T_QI = 0x40000
+};
+
+#define v8qi_UP  T_V8QI
+#define v4hi_UP  T_V4HI
+#define v2si_UP  T_V2SI
+#define v2sf_UP  T_V2SF
+#define di_UP    T_DI
+#define df_UP    T_DF
+#define v16qi_UP T_V16QI
+#define v8hi_UP  T_V8HI
+#define v4si_UP  T_V4SI
+#define v4sf_UP  T_V4SF
+#define v2di_UP  T_V2DI
+#define v2df_UP  T_V2DF
+#define ti_UP   T_TI
+#define ei_UP   T_EI
+#define oi_UP   T_OI
+#define xi_UP   T_XI
+#define si_UP    T_SI
+#define hi_UP    T_HI
+#define qi_UP    T_QI
+
+#define UP(X) X##_UP
+
+#define T_MAX 19
+
+typedef enum
+{
+  AARCH64_SIMD_BINOP,
+  AARCH64_SIMD_TERNOP,
+  AARCH64_SIMD_QUADOP,
+  AARCH64_SIMD_UNOP,
+  AARCH64_SIMD_GETLANE,
+  AARCH64_SIMD_SETLANE,
+  AARCH64_SIMD_CREATE,
+  AARCH64_SIMD_DUP,
+  AARCH64_SIMD_DUPLANE,
+  AARCH64_SIMD_COMBINE,
+  AARCH64_SIMD_SPLIT,
+  AARCH64_SIMD_LANEMUL,
+  AARCH64_SIMD_LANEMULL,
+  AARCH64_SIMD_LANEMULH,
+  AARCH64_SIMD_LANEMAC,
+  AARCH64_SIMD_SCALARMUL,
+  AARCH64_SIMD_SCALARMULL,
+  AARCH64_SIMD_SCALARMULH,
+  AARCH64_SIMD_SCALARMAC,
+  AARCH64_SIMD_CONVERT,
+  AARCH64_SIMD_FIXCONV,
+  AARCH64_SIMD_SELECT,
+  AARCH64_SIMD_RESULTPAIR,
+  AARCH64_SIMD_REINTERP,
+  AARCH64_SIMD_VTBL,
+  AARCH64_SIMD_VTBX,
+  AARCH64_SIMD_LOAD1,
+  AARCH64_SIMD_LOAD1LANE,
+  AARCH64_SIMD_STORE1,
+  AARCH64_SIMD_STORE1LANE,
+  AARCH64_SIMD_LOADSTRUCT,
+  AARCH64_SIMD_LOADSTRUCTLANE,
+  AARCH64_SIMD_STORESTRUCT,
+  AARCH64_SIMD_STORESTRUCTLANE,
+  AARCH64_SIMD_LOGICBINOP,
+  AARCH64_SIMD_SHIFTINSERT,
+  AARCH64_SIMD_SHIFTIMM,
+  AARCH64_SIMD_SHIFTACC
+} aarch64_simd_itype;
+
+typedef struct
+{
+  const char *name;
+  const aarch64_simd_itype itype;
+  const int bits;
+  const enum insn_code codes[T_MAX];
+  const unsigned int num_vars;
+  unsigned int base_fcode;
+} aarch64_simd_builtin_datum;
+
+#define CF(N, X) CODE_FOR_aarch64_##N##X
+
+#define VAR1(T, N, A) \
+  #N, AARCH64_SIMD_##T, UP (A), { CF (N, A) }, 1, 0
+#define VAR2(T, N, A, B) \
+  #N, AARCH64_SIMD_##T, UP (A) | UP (B), { CF (N, A), CF (N, B) }, 2, 0
+#define VAR3(T, N, A, B, C) \
+  #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C), \
+  { CF (N, A), CF (N, B), CF (N, C) }, 3, 0
+#define VAR4(T, N, A, B, C, D) \
+  #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D), \
+  { CF (N, A), CF (N, B), CF (N, C), CF (N, D) }, 4, 0
+#define VAR5(T, N, A, B, C, D, E) \
+  #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E), \
+  { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E) }, 5, 0
+#define VAR6(T, N, A, B, C, D, E, F) \
+  #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E) | UP (F), \
+  { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F) }, 6, 0
+#define VAR7(T, N, A, B, C, D, E, F, G) \
+  #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) \
+                       | UP (E) | UP (F) | UP (G), \
+  { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+    CF (N, G) }, 7, 0
+#define VAR8(T, N, A, B, C, D, E, F, G, H) \
+  #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) \
+               | UP (E) | UP (F) | UP (G) \
+               | UP (H), \
+  { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+    CF (N, G), CF (N, H) }, 8, 0
+#define VAR9(T, N, A, B, C, D, E, F, G, H, I) \
+  #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) \
+               | UP (E) | UP (F) | UP (G) \
+               | UP (H) | UP (I), \
+  { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+    CF (N, G), CF (N, H), CF (N, I) }, 9, 0
+#define VAR10(T, N, A, B, C, D, E, F, G, H, I, J) \
+  #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) \
+               | UP (E) | UP (F) | UP (G) \
+               | UP (H) | UP (I) | UP (J), \
+  { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+    CF (N, G), CF (N, H), CF (N, I), CF (N, J) }, 10, 0
+
+#define VAR11(T, N, A, B, C, D, E, F, G, H, I, J, K) \
+  #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) \
+               | UP (E) | UP (F) | UP (G) \
+               | UP (H) | UP (I) | UP (J) | UP (K), \
+  { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+    CF (N, G), CF (N, H), CF (N, I), CF (N, J), CF (N, K) }, 11, 0
+
+#define VAR12(T, N, A, B, C, D, E, F, G, H, I, J, K, L) \
+  #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) \
+               | UP (E) | UP (F) | UP (G) \
+               | UP (H) | UP (I) | UP (J) | UP (K) | UP (L), \
+  { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+    CF (N, G), CF (N, H), CF (N, I), CF (N, J), CF (N, K), CF (N, L) }, 12, 0
+
+
+/* The mode entries in the following table correspond to the "key" type of the
+   instruction variant, i.e. equivalent to that which would be specified after
+   the assembler mnemonic, which usually refers to the last vector operand.
+   (Signed/unsigned/polynomial types are not differentiated between though, and
+   are all mapped onto the same mode for a given element size.) The modes
+   listed per instruction should be the same as those defined for that
+   instruction's pattern in aarch64_simd.md.
+   WARNING: Variants should be listed in the same increasing order as
+   aarch64_simd_builtin_type_bits.  */
+
+static aarch64_simd_builtin_datum aarch64_simd_builtin_data[] = {
+  {VAR6 (CREATE, create, v8qi, v4hi, v2si, v2sf, di, df)},
+  {VAR6 (GETLANE, get_lane_signed,
+         v8qi, v4hi, v2si, v16qi, v8hi, v4si)},
+  {VAR7 (GETLANE, get_lane_unsigned,
+         v8qi, v4hi, v2si, v16qi, v8hi, v4si, v2di)},
+  {VAR4 (GETLANE, get_lane, v2sf, di, v4sf, v2df)},
+  {VAR6 (GETLANE, get_dregoi, v8qi, v4hi, v2si, v2sf, di, df)},
+  {VAR6 (GETLANE, get_qregoi, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+  {VAR6 (GETLANE, get_dregci, v8qi, v4hi, v2si, v2sf, di, df)},
+  {VAR6 (GETLANE, get_qregci, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+  {VAR6 (GETLANE, get_dregxi, v8qi, v4hi, v2si, v2sf, di, df)},
+  {VAR6 (GETLANE, get_qregxi, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+  {VAR6 (SETLANE, set_qregoi, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+  {VAR6 (SETLANE, set_qregci, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+  {VAR6 (SETLANE, set_qregxi, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+
+  {VAR5 (REINTERP, reinterpretv8qi, v8qi, v4hi, v2si, v2sf, di)},
+  {VAR5 (REINTERP, reinterpretv4hi, v8qi, v4hi, v2si, v2sf, di)},
+  {VAR5 (REINTERP, reinterpretv2si, v8qi, v4hi, v2si, v2sf, di)},
+  {VAR5 (REINTERP, reinterpretv2sf, v8qi, v4hi, v2si, v2sf, di)},
+  {VAR5 (REINTERP, reinterpretdi, v8qi, v4hi, v2si, v2sf, di)},
+  {VAR6 (REINTERP, reinterpretv16qi, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+  {VAR6 (REINTERP, reinterpretv8hi, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+  {VAR6 (REINTERP, reinterpretv4si, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+  {VAR6 (REINTERP, reinterpretv4sf, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+  {VAR6 (REINTERP, reinterpretv2di, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+  {VAR6 (COMBINE, combine, v8qi, v4hi, v2si, v2sf, di, df)},
+
+  {VAR3 (BINOP, saddl, v8qi, v4hi, v2si)},
+  {VAR3 (BINOP, uaddl, v8qi, v4hi, v2si)},
+  {VAR3 (BINOP, saddl2, v16qi, v8hi, v4si)},
+  {VAR3 (BINOP, uaddl2, v16qi, v8hi, v4si)},
+  {VAR3 (BINOP, saddw, v8qi, v4hi, v2si)},
+  {VAR3 (BINOP, uaddw, v8qi, v4hi, v2si)},
+  {VAR3 (BINOP, saddw2, v16qi, v8hi, v4si)},
+  {VAR3 (BINOP, uaddw2, v16qi, v8hi, v4si)},
+  {VAR6 (BINOP, shadd, v8qi, v4hi, v2si, v16qi, v8hi, v4si)},
+  {VAR6 (BINOP, uhadd, v8qi, v4hi, v2si, v16qi, v8hi, v4si)},
+  {VAR6 (BINOP, srhadd, v8qi, v4hi, v2si, v16qi, v8hi, v4si)},
+  {VAR6 (BINOP, urhadd, v8qi, v4hi, v2si, v16qi, v8hi, v4si)},
+  {VAR3 (BINOP, addhn, v8hi, v4si, v2di)},
+  {VAR3 (BINOP, raddhn, v8hi, v4si, v2di)},
+  {VAR3 (TERNOP, addhn2, v8hi, v4si, v2di)},
+  {VAR3 (TERNOP, raddhn2, v8hi, v4si, v2di)},
+  {VAR3 (BINOP, ssubl, v8qi, v4hi, v2si)},
+  {VAR3 (BINOP, usubl, v8qi, v4hi, v2si)},
+  {VAR3 (BINOP, ssubl2, v16qi, v8hi, v4si) },
+  {VAR3 (BINOP, usubl2, v16qi, v8hi, v4si) },
+  {VAR3 (BINOP, ssubw, v8qi, v4hi, v2si) },
+  {VAR3 (BINOP, usubw, v8qi, v4hi, v2si) },
+  {VAR3 (BINOP, ssubw2, v16qi, v8hi, v4si) },
+  {VAR3 (BINOP, usubw2, v16qi, v8hi, v4si) },
+  {VAR11 (BINOP, sqadd, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+         si, hi, qi)},
+  {VAR11 (BINOP, uqadd, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+         si, hi, qi)},
+  {VAR11 (BINOP, sqsub, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+         si, hi, qi)},
+  {VAR11 (BINOP, uqsub, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+         si, hi, qi)},
+  {VAR11 (BINOP, suqadd, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+         si, hi, qi)},
+  {VAR11 (BINOP, usqadd, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+         si, hi, qi)},
+  {VAR6 (UNOP, sqmovun, di, v8hi, v4si, v2di, si, hi)},
+  {VAR6 (UNOP, sqmovn, di, v8hi, v4si, v2di, si, hi)},
+  {VAR6 (UNOP, uqmovn, di, v8hi, v4si, v2di, si, hi)},
+  {VAR10 (UNOP, sqabs, v8qi, v4hi, v2si, v16qi, v8hi, v4si, v2di, si, hi, qi)},
+  {VAR10 (UNOP, sqneg, v8qi, v4hi, v2si, v16qi, v8hi, v4si, v2di, si, hi, qi)},
+  {VAR2 (BINOP, pmul, v8qi, v16qi)},
+  {VAR4 (TERNOP, sqdmlal, v4hi, v2si, si, hi)},
+  {VAR4 (QUADOP, sqdmlal_lane, v4hi, v2si, si, hi) },
+  {VAR2 (QUADOP, sqdmlal_laneq, v4hi, v2si) },
+  {VAR2 (TERNOP, sqdmlal_n, v4hi, v2si) },
+  {VAR2 (TERNOP, sqdmlal2, v8hi, v4si)},
+  {VAR2 (QUADOP, sqdmlal2_lane, v8hi, v4si) },
+  {VAR2 (QUADOP, sqdmlal2_laneq, v8hi, v4si) },
+  {VAR2 (TERNOP, sqdmlal2_n, v8hi, v4si) },
+  {VAR4 (TERNOP, sqdmlsl, v4hi, v2si, si, hi)},
+  {VAR4 (QUADOP, sqdmlsl_lane, v4hi, v2si, si, hi) },
+  {VAR2 (QUADOP, sqdmlsl_laneq, v4hi, v2si) },
+  {VAR2 (TERNOP, sqdmlsl_n, v4hi, v2si) },
+  {VAR2 (TERNOP, sqdmlsl2, v8hi, v4si)},
+  {VAR2 (QUADOP, sqdmlsl2_lane, v8hi, v4si) },
+  {VAR2 (QUADOP, sqdmlsl2_laneq, v8hi, v4si) },
+  {VAR2 (TERNOP, sqdmlsl2_n, v8hi, v4si) },
+  {VAR4 (BINOP, sqdmull, v4hi, v2si, si, hi)},
+  {VAR4 (TERNOP, sqdmull_lane, v4hi, v2si, si, hi) },
+  {VAR2 (TERNOP, sqdmull_laneq, v4hi, v2si) },
+  {VAR2 (BINOP, sqdmull_n, v4hi, v2si) },
+  {VAR2 (BINOP, sqdmull2, v8hi, v4si) },
+  {VAR2 (TERNOP, sqdmull2_lane, v8hi, v4si) },
+  {VAR2 (TERNOP, sqdmull2_laneq, v8hi, v4si) },
+  {VAR2 (BINOP, sqdmull2_n, v8hi, v4si) },
+  {VAR6 (BINOP, sqdmulh, v4hi, v2si, v8hi, v4si, si, hi)},
+  {VAR6 (BINOP, sqrdmulh, v4hi, v2si, v8hi, v4si, si, hi)},
+  {VAR8 (BINOP, sshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR3 (SHIFTIMM, sshll_n, v8qi, v4hi, v2si) },
+  {VAR3 (SHIFTIMM, ushll_n, v8qi, v4hi, v2si) },
+  {VAR3 (SHIFTIMM, sshll2_n, v16qi, v8hi, v4si) },
+  {VAR3 (SHIFTIMM, ushll2_n, v16qi, v8hi, v4si) },
+  {VAR8 (BINOP, ushl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR8 (BINOP, sshl_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR8 (BINOP, ushl_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR11 (BINOP, sqshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+         si, hi, qi) },
+  {VAR11 (BINOP, uqshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+         si, hi, qi) },
+  {VAR8 (BINOP, srshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR8 (BINOP, urshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR11 (BINOP, sqrshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+         si, hi, qi) },
+  {VAR11 (BINOP, uqrshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+         si, hi, qi) },
+  {VAR8 (SHIFTIMM, sshr_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR8 (SHIFTIMM, ushr_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR8 (SHIFTIMM, srshr_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR8 (SHIFTIMM, urshr_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR8 (SHIFTACC, ssra_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR8 (SHIFTACC, usra_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR8 (SHIFTACC, srsra_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR8 (SHIFTACC, ursra_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR8 (SHIFTINSERT, ssri_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR8 (SHIFTINSERT, usri_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR8 (SHIFTINSERT, ssli_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR8 (SHIFTINSERT, usli_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  {VAR11 (SHIFTIMM, sqshlu_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+         si, hi, qi) },
+  {VAR11 (SHIFTIMM, sqshl_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+         si, hi, qi) },
+  {VAR11 (SHIFTIMM, uqshl_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+         si, hi, qi) },
+  { VAR6 (SHIFTIMM, sqshrun_n, di, v8hi, v4si, v2di, si, hi) },
+  { VAR6 (SHIFTIMM, sqrshrun_n, di, v8hi, v4si, v2di, si, hi) },
+  { VAR6 (SHIFTIMM, sqshrn_n, di, v8hi, v4si, v2di, si, hi) },
+  { VAR6 (SHIFTIMM, uqshrn_n, di, v8hi, v4si, v2di, si, hi) },
+  { VAR6 (SHIFTIMM, sqrshrn_n, di, v8hi, v4si, v2di, si, hi) },
+  { VAR6 (SHIFTIMM, uqrshrn_n, di, v8hi, v4si, v2di, si, hi) },
+  { VAR8 (BINOP, cmeq, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  { VAR8 (BINOP, cmge, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  { VAR8 (BINOP, cmgt, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  { VAR8 (BINOP, cmle, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  { VAR8 (BINOP, cmlt, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  { VAR8 (BINOP, cmhs, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  { VAR8 (BINOP, cmhi, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  { VAR8 (BINOP, cmtst, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+  { VAR6 (TERNOP, sqdmulh_lane, v4hi, v2si, v8hi, v4si, si, hi) },
+  { VAR6 (TERNOP, sqrdmulh_lane, v4hi, v2si, v8hi, v4si, si, hi) },
+  { VAR3 (BINOP, addp, v8qi, v4hi, v2si) },
+  { VAR1 (UNOP, addp, di) },
+  { VAR11 (BINOP, dup_lane, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+         si, hi, qi) },
+  { VAR3 (BINOP, fmax, v2sf, v4sf, v2df) },
+  { VAR3 (BINOP, fmin, v2sf, v4sf, v2df) },
+  { VAR6 (BINOP, smax, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+  { VAR6 (BINOP, smin, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+  { VAR6 (BINOP, umax, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+  { VAR6 (BINOP, umin, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+  { VAR3 (UNOP, sqrt, v2sf, v4sf, v2df) },
+  {VAR12 (LOADSTRUCT, ld2,
+        v8qi, v4hi, v2si, v2sf, di, df, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+  {VAR12 (LOADSTRUCT, ld3,
+        v8qi, v4hi, v2si, v2sf, di, df, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+  {VAR12 (LOADSTRUCT, ld4,
+        v8qi, v4hi, v2si, v2sf, di, df, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+  {VAR12 (STORESTRUCT, st2,
+        v8qi, v4hi, v2si, v2sf, di, df, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+  {VAR12 (STORESTRUCT, st3,
+        v8qi, v4hi, v2si, v2sf, di, df, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+  {VAR12 (STORESTRUCT, st4,
+        v8qi, v4hi, v2si, v2sf, di, df, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+};
+
+#undef CF
+#undef VAR1
+#undef VAR2
+#undef VAR3
+#undef VAR4
+#undef VAR5
+#undef VAR6
+#undef VAR7
+#undef VAR8
+#undef VAR9
+#undef VAR10
+#undef VAR11
+
+#define NUM_DREG_TYPES 6
+#define NUM_QREG_TYPES 6
+
+void
+init_aarch64_simd_builtins (void)
+{
+  unsigned int i, fcode = AARCH64_SIMD_BUILTIN_BASE;
+
+  /* Scalar type nodes.  */
+  tree aarch64_simd_intQI_type_node;
+  tree aarch64_simd_intHI_type_node;
+  tree aarch64_simd_polyQI_type_node;
+  tree aarch64_simd_polyHI_type_node;
+  tree aarch64_simd_intSI_type_node;
+  tree aarch64_simd_intDI_type_node;
+  tree aarch64_simd_float_type_node;
+  tree aarch64_simd_double_type_node;
+
+  /* Pointer to scalar type nodes.  */
+  tree intQI_pointer_node;
+  tree intHI_pointer_node;
+  tree intSI_pointer_node;
+  tree intDI_pointer_node;
+  tree float_pointer_node;
+  tree double_pointer_node;
+
+  /* Const scalar type nodes.  */
+  tree const_intQI_node;
+  tree const_intHI_node;
+  tree const_intSI_node;
+  tree const_intDI_node;
+  tree const_float_node;
+  tree const_double_node;
+
+  /* Pointer to const scalar type nodes.  */
+  tree const_intQI_pointer_node;
+  tree const_intHI_pointer_node;
+  tree const_intSI_pointer_node;
+  tree const_intDI_pointer_node;
+  tree const_float_pointer_node;
+  tree const_double_pointer_node;
+
+  /* Vector type nodes.  */
+  tree V8QI_type_node;
+  tree V4HI_type_node;
+  tree V2SI_type_node;
+  tree V2SF_type_node;
+  tree V16QI_type_node;
+  tree V8HI_type_node;
+  tree V4SI_type_node;
+  tree V4SF_type_node;
+  tree V2DI_type_node;
+  tree V2DF_type_node;
+
+  /* Scalar unsigned type nodes.  */
+  tree intUQI_type_node;
+  tree intUHI_type_node;
+  tree intUSI_type_node;
+  tree intUDI_type_node;
+
+  /* Opaque integer types for structures of vectors.  */
+  tree intEI_type_node;
+  tree intOI_type_node;
+  tree intCI_type_node;
+  tree intXI_type_node;
+
+  /* Pointer to vector type nodes.  */
+  tree V8QI_pointer_node;
+  tree V4HI_pointer_node;
+  tree V2SI_pointer_node;
+  tree V2SF_pointer_node;
+  tree V16QI_pointer_node;
+  tree V8HI_pointer_node;
+  tree V4SI_pointer_node;
+  tree V4SF_pointer_node;
+  tree V2DI_pointer_node;
+  tree V2DF_pointer_node;
+
+  /* Operations which return results as pairs.  */
+  tree void_ftype_pv8qi_v8qi_v8qi;
+  tree void_ftype_pv4hi_v4hi_v4hi;
+  tree void_ftype_pv2si_v2si_v2si;
+  tree void_ftype_pv2sf_v2sf_v2sf;
+  tree void_ftype_pdi_di_di;
+  tree void_ftype_pv16qi_v16qi_v16qi;
+  tree void_ftype_pv8hi_v8hi_v8hi;
+  tree void_ftype_pv4si_v4si_v4si;
+  tree void_ftype_pv4sf_v4sf_v4sf;
+  tree void_ftype_pv2di_v2di_v2di;
+  tree void_ftype_pv2df_v2df_v2df;
+
+  tree reinterp_ftype_dreg[NUM_DREG_TYPES][NUM_DREG_TYPES];
+  tree reinterp_ftype_qreg[NUM_QREG_TYPES][NUM_QREG_TYPES];
+  tree dreg_types[NUM_DREG_TYPES], qreg_types[NUM_QREG_TYPES];
+
+  /* Create distinguished type nodes for AARCH64_SIMD vector element types,
+     and pointers to values of such types, so we can detect them later.  */
+  aarch64_simd_intQI_type_node =
+    make_signed_type (GET_MODE_PRECISION (QImode));
+  aarch64_simd_intHI_type_node =
+    make_signed_type (GET_MODE_PRECISION (HImode));
+  aarch64_simd_polyQI_type_node =
+    make_signed_type (GET_MODE_PRECISION (QImode));
+  aarch64_simd_polyHI_type_node =
+    make_signed_type (GET_MODE_PRECISION (HImode));
+  aarch64_simd_intSI_type_node =
+    make_signed_type (GET_MODE_PRECISION (SImode));
+  aarch64_simd_intDI_type_node =
+    make_signed_type (GET_MODE_PRECISION (DImode));
+  aarch64_simd_float_type_node = make_node (REAL_TYPE);
+  aarch64_simd_double_type_node = make_node (REAL_TYPE);
+  TYPE_PRECISION (aarch64_simd_float_type_node) = FLOAT_TYPE_SIZE;
+  TYPE_PRECISION (aarch64_simd_double_type_node) = DOUBLE_TYPE_SIZE;
+  layout_type (aarch64_simd_float_type_node);
+  layout_type (aarch64_simd_double_type_node);
+
+  /* Define typedefs which exactly correspond to the modes we are basing vector
+     types on.  If you change these names you'll need to change
+     the table used by aarch64_mangle_type too.  */
+  (*lang_hooks.types.register_builtin_type) (aarch64_simd_intQI_type_node,
+                                            "__builtin_aarch64_simd_qi");
+  (*lang_hooks.types.register_builtin_type) (aarch64_simd_intHI_type_node,
+                                            "__builtin_aarch64_simd_hi");
+  (*lang_hooks.types.register_builtin_type) (aarch64_simd_intSI_type_node,
+                                            "__builtin_aarch64_simd_si");
+  (*lang_hooks.types.register_builtin_type) (aarch64_simd_float_type_node,
+                                            "__builtin_aarch64_simd_sf");
+  (*lang_hooks.types.register_builtin_type) (aarch64_simd_intDI_type_node,
+                                            "__builtin_aarch64_simd_di");
+  (*lang_hooks.types.register_builtin_type) (aarch64_simd_double_type_node,
+                                            "__builtin_aarch64_simd_df");
+  (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyQI_type_node,
+                                            "__builtin_aarch64_simd_poly8");
+  (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyHI_type_node,
+                                            "__builtin_aarch64_simd_poly16");
+
+  intQI_pointer_node = build_pointer_type (aarch64_simd_intQI_type_node);
+  intHI_pointer_node = build_pointer_type (aarch64_simd_intHI_type_node);
+  intSI_pointer_node = build_pointer_type (aarch64_simd_intSI_type_node);
+  intDI_pointer_node = build_pointer_type (aarch64_simd_intDI_type_node);
+  float_pointer_node = build_pointer_type (aarch64_simd_float_type_node);
+  double_pointer_node = build_pointer_type (aarch64_simd_double_type_node);
+
+  /* Next create constant-qualified versions of the above types.  */
+  const_intQI_node = build_qualified_type (aarch64_simd_intQI_type_node,
+                                          TYPE_QUAL_CONST);
+  const_intHI_node = build_qualified_type (aarch64_simd_intHI_type_node,
+                                          TYPE_QUAL_CONST);
+  const_intSI_node = build_qualified_type (aarch64_simd_intSI_type_node,
+                                          TYPE_QUAL_CONST);
+  const_intDI_node = build_qualified_type (aarch64_simd_intDI_type_node,
+                                          TYPE_QUAL_CONST);
+  const_float_node = build_qualified_type (aarch64_simd_float_type_node,
+                                          TYPE_QUAL_CONST);
+  const_double_node = build_qualified_type (aarch64_simd_double_type_node,
+                                           TYPE_QUAL_CONST);
+
+  const_intQI_pointer_node = build_pointer_type (const_intQI_node);
+  const_intHI_pointer_node = build_pointer_type (const_intHI_node);
+  const_intSI_pointer_node = build_pointer_type (const_intSI_node);
+  const_intDI_pointer_node = build_pointer_type (const_intDI_node);
+  const_float_pointer_node = build_pointer_type (const_float_node);
+  const_double_pointer_node = build_pointer_type (const_double_node);
+
+  /* Now create vector types based on our AARCH64 SIMD element types.  */
+  /* 64-bit vectors.  */
+  V8QI_type_node =
+    build_vector_type_for_mode (aarch64_simd_intQI_type_node, V8QImode);
+  V4HI_type_node =
+    build_vector_type_for_mode (aarch64_simd_intHI_type_node, V4HImode);
+  V2SI_type_node =
+    build_vector_type_for_mode (aarch64_simd_intSI_type_node, V2SImode);
+  V2SF_type_node =
+    build_vector_type_for_mode (aarch64_simd_float_type_node, V2SFmode);
+  /* 128-bit vectors.  */
+  V16QI_type_node =
+    build_vector_type_for_mode (aarch64_simd_intQI_type_node, V16QImode);
+  V8HI_type_node =
+    build_vector_type_for_mode (aarch64_simd_intHI_type_node, V8HImode);
+  V4SI_type_node =
+    build_vector_type_for_mode (aarch64_simd_intSI_type_node, V4SImode);
+  V4SF_type_node =
+    build_vector_type_for_mode (aarch64_simd_float_type_node, V4SFmode);
+  V2DI_type_node =
+    build_vector_type_for_mode (aarch64_simd_intDI_type_node, V2DImode);
+  V2DF_type_node =
+    build_vector_type_for_mode (aarch64_simd_double_type_node, V2DFmode);
+
+  /* Unsigned integer types for various mode sizes.  */
+  intUQI_type_node = make_unsigned_type (GET_MODE_PRECISION (QImode));
+  intUHI_type_node = make_unsigned_type (GET_MODE_PRECISION (HImode));
+  intUSI_type_node = make_unsigned_type (GET_MODE_PRECISION (SImode));
+  intUDI_type_node = make_unsigned_type (GET_MODE_PRECISION (DImode));
+
+  (*lang_hooks.types.register_builtin_type) (intUQI_type_node,
+                                            "__builtin_aarch64_simd_uqi");
+  (*lang_hooks.types.register_builtin_type) (intUHI_type_node,
+                                            "__builtin_aarch64_simd_uhi");
+  (*lang_hooks.types.register_builtin_type) (intUSI_type_node,
+                                            "__builtin_aarch64_simd_usi");
+  (*lang_hooks.types.register_builtin_type) (intUDI_type_node,
+                                            "__builtin_aarch64_simd_udi");
+
+  /* Opaque integer types for structures of vectors.  */
+  intEI_type_node = make_signed_type (GET_MODE_PRECISION (EImode));
+  intOI_type_node = make_signed_type (GET_MODE_PRECISION (OImode));
+  intCI_type_node = make_signed_type (GET_MODE_PRECISION (CImode));
+  intXI_type_node = make_signed_type (GET_MODE_PRECISION (XImode));
+
+  (*lang_hooks.types.register_builtin_type) (intTI_type_node,
+                                            "__builtin_aarch64_simd_ti");
+  (*lang_hooks.types.register_builtin_type) (intEI_type_node,
+                                            "__builtin_aarch64_simd_ei");
+  (*lang_hooks.types.register_builtin_type) (intOI_type_node,
+                                            "__builtin_aarch64_simd_oi");
+  (*lang_hooks.types.register_builtin_type) (intCI_type_node,
+                                            "__builtin_aarch64_simd_ci");
+  (*lang_hooks.types.register_builtin_type) (intXI_type_node,
+                                            "__builtin_aarch64_simd_xi");
+
+  /* Pointers to vector types.  */
+  V8QI_pointer_node = build_pointer_type (V8QI_type_node);
+  V4HI_pointer_node = build_pointer_type (V4HI_type_node);
+  V2SI_pointer_node = build_pointer_type (V2SI_type_node);
+  V2SF_pointer_node = build_pointer_type (V2SF_type_node);
+  V16QI_pointer_node = build_pointer_type (V16QI_type_node);
+  V8HI_pointer_node = build_pointer_type (V8HI_type_node);
+  V4SI_pointer_node = build_pointer_type (V4SI_type_node);
+  V4SF_pointer_node = build_pointer_type (V4SF_type_node);
+  V2DI_pointer_node = build_pointer_type (V2DI_type_node);
+  V2DF_pointer_node = build_pointer_type (V2DF_type_node);
+
+  /* Operations which return results as pairs.  */
+  void_ftype_pv8qi_v8qi_v8qi =
+    build_function_type_list (void_type_node, V8QI_pointer_node,
+                             V8QI_type_node, V8QI_type_node, NULL);
+  void_ftype_pv4hi_v4hi_v4hi =
+    build_function_type_list (void_type_node, V4HI_pointer_node,
+                             V4HI_type_node, V4HI_type_node, NULL);
+  void_ftype_pv2si_v2si_v2si =
+    build_function_type_list (void_type_node, V2SI_pointer_node,
+                             V2SI_type_node, V2SI_type_node, NULL);
+  void_ftype_pv2sf_v2sf_v2sf =
+    build_function_type_list (void_type_node, V2SF_pointer_node,
+                             V2SF_type_node, V2SF_type_node, NULL);
+  void_ftype_pdi_di_di =
+    build_function_type_list (void_type_node, intDI_pointer_node,
+                             aarch64_simd_intDI_type_node,
+                             aarch64_simd_intDI_type_node, NULL);
+  void_ftype_pv16qi_v16qi_v16qi =
+    build_function_type_list (void_type_node, V16QI_pointer_node,
+                             V16QI_type_node, V16QI_type_node, NULL);
+  void_ftype_pv8hi_v8hi_v8hi =
+    build_function_type_list (void_type_node, V8HI_pointer_node,
+                             V8HI_type_node, V8HI_type_node, NULL);
+  void_ftype_pv4si_v4si_v4si =
+    build_function_type_list (void_type_node, V4SI_pointer_node,
+                             V4SI_type_node, V4SI_type_node, NULL);
+  void_ftype_pv4sf_v4sf_v4sf =
+    build_function_type_list (void_type_node, V4SF_pointer_node,
+                             V4SF_type_node, V4SF_type_node, NULL);
+  void_ftype_pv2di_v2di_v2di =
+    build_function_type_list (void_type_node, V2DI_pointer_node,
+                             V2DI_type_node, V2DI_type_node, NULL);
+  void_ftype_pv2df_v2df_v2df =
+    build_function_type_list (void_type_node, V2DF_pointer_node,
+                             V2DF_type_node, V2DF_type_node, NULL);
+
+  dreg_types[0] = V8QI_type_node;
+  dreg_types[1] = V4HI_type_node;
+  dreg_types[2] = V2SI_type_node;
+  dreg_types[3] = V2SF_type_node;
+  dreg_types[4] = aarch64_simd_intDI_type_node;
+  dreg_types[5] = aarch64_simd_double_type_node;
+
+  qreg_types[0] = V16QI_type_node;
+  qreg_types[1] = V8HI_type_node;
+  qreg_types[2] = V4SI_type_node;
+  qreg_types[3] = V4SF_type_node;
+  qreg_types[4] = V2DI_type_node;
+  qreg_types[5] = V2DF_type_node;
+
+  /* If NUM_DREG_TYPES != NUM_QREG_TYPES, we will need separate nested loops
+     for qreg and dreg reinterp inits.  */
+  for (i = 0; i < NUM_DREG_TYPES; i++)
+    {
+      int j;
+      for (j = 0; j < NUM_DREG_TYPES; j++)
+       {
+         reinterp_ftype_dreg[i][j]
+           = build_function_type_list (dreg_types[i], dreg_types[j], NULL);
+         reinterp_ftype_qreg[i][j]
+           = build_function_type_list (qreg_types[i], qreg_types[j], NULL);
+       }
+    }
+
+  for (i = 0; i < ARRAY_SIZE (aarch64_simd_builtin_data); i++)
+    {
+      aarch64_simd_builtin_datum *d = &aarch64_simd_builtin_data[i];
+      unsigned int j, codeidx = 0;
+
+      d->base_fcode = fcode;
+
+      for (j = 0; j < T_MAX; j++)
+       {
+         const char *const modenames[] = {
+           "v8qi", "v4hi", "v2si", "v2sf", "di", "df",
+           "v16qi", "v8hi", "v4si", "v4sf", "v2di", "v2df",
+           "ti", "ei", "oi", "xi", "si", "hi", "qi"
+         };
+         char namebuf[60];
+         tree ftype = NULL;
+         enum insn_code icode;
+         int is_load = 0;
+         int is_store = 0;
+
+         /* Skip if particular mode not supported.  */
+         if ((d->bits & (1 << j)) == 0)
+           continue;
+
+         icode = d->codes[codeidx++];
+
+         switch (d->itype)
+           {
+           case AARCH64_SIMD_LOAD1:
+           case AARCH64_SIMD_LOAD1LANE:
+           case AARCH64_SIMD_LOADSTRUCTLANE:
+           case AARCH64_SIMD_LOADSTRUCT:
+             is_load = 1;
+             /* Fall through.  */
+           case AARCH64_SIMD_STORE1:
+           case AARCH64_SIMD_STORE1LANE:
+           case AARCH64_SIMD_STORESTRUCTLANE:
+           case AARCH64_SIMD_STORESTRUCT:
+             if (!is_load)
+               is_store = 1;
+             /* Fall through.  */
+           case AARCH64_SIMD_UNOP:
+           case AARCH64_SIMD_BINOP:
+           case AARCH64_SIMD_LOGICBINOP:
+           case AARCH64_SIMD_SHIFTINSERT:
+           case AARCH64_SIMD_TERNOP:
+           case AARCH64_SIMD_QUADOP:
+           case AARCH64_SIMD_GETLANE:
+           case AARCH64_SIMD_SETLANE:
+           case AARCH64_SIMD_CREATE:
+           case AARCH64_SIMD_DUP:
+           case AARCH64_SIMD_DUPLANE:
+           case AARCH64_SIMD_SHIFTIMM:
+           case AARCH64_SIMD_SHIFTACC:
+           case AARCH64_SIMD_COMBINE:
+           case AARCH64_SIMD_SPLIT:
+           case AARCH64_SIMD_CONVERT:
+           case AARCH64_SIMD_FIXCONV:
+           case AARCH64_SIMD_LANEMUL:
+           case AARCH64_SIMD_LANEMULL:
+           case AARCH64_SIMD_LANEMULH:
+           case AARCH64_SIMD_LANEMAC:
+           case AARCH64_SIMD_SCALARMUL:
+           case AARCH64_SIMD_SCALARMULL:
+           case AARCH64_SIMD_SCALARMULH:
+           case AARCH64_SIMD_SCALARMAC:
+           case AARCH64_SIMD_SELECT:
+           case AARCH64_SIMD_VTBL:
+           case AARCH64_SIMD_VTBX:
+             {
+               int k;
+               tree return_type = void_type_node, args = void_list_node;
+
+               /* Build a function type directly from the insn_data for this
+                  builtin.  The build_function_type() function takes care of
+                  removing duplicates for us.  */
+               for (k = insn_data[icode].n_operands - 1; k >= 0; k--)
+                 {
+                   tree eltype;
+
+                   /* Skip an internal operand for vget_{low, high}.  */
+                   if (k == 2 && d->itype == AARCH64_SIMD_SPLIT)
+                     continue;
+
+                   if (is_load && k == 1)
+                     {
+                       /* AdvSIMD load patterns always have the memory operand
+                          (a DImode pointer) in the operand 1 position.  We
+                          want a const pointer to the element type in that
+                          position.  */
+                       gcc_assert (insn_data[icode].operand[k].mode ==
+                                   DImode);
+
+                       switch (1 << j)
+                         {
+                         case T_V8QI:
+                         case T_V16QI:
+                           eltype = const_intQI_pointer_node;
+                           break;
+
+                         case T_V4HI:
+                         case T_V8HI:
+                           eltype = const_intHI_pointer_node;
+                           break;
+
+                         case T_V2SI:
+                         case T_V4SI:
+                           eltype = const_intSI_pointer_node;
+                           break;
+
+                         case T_V2SF:
+                         case T_V4SF:
+                           eltype = const_float_pointer_node;
+                           break;
+
+                         case T_DI:
+                         case T_V2DI:
+                           eltype = const_intDI_pointer_node;
+                           break;
+
+                         case T_DF:
+                         case T_V2DF:
+                           eltype = const_double_pointer_node;
+                           break;
+
+                         default:
+                           gcc_unreachable ();
+                         }
+                     }
+                   else if (is_store && k == 0)
+                     {
+                       /* Similarly, AdvSIMD store patterns use operand 0 as
+                          the memory location to store to (a DImode pointer).
+                          Use a pointer to the element type of the store in
+                          that position.  */
+                       gcc_assert (insn_data[icode].operand[k].mode ==
+                                   DImode);
+
+                       switch (1 << j)
+                         {
+                         case T_V8QI:
+                         case T_V16QI:
+                           eltype = intQI_pointer_node;
+                           break;
+
+                         case T_V4HI:
+                         case T_V8HI:
+                           eltype = intHI_pointer_node;
+                           break;
+
+                         case T_V2SI:
+                         case T_V4SI:
+                           eltype = intSI_pointer_node;
+                           break;
+
+                         case T_V2SF:
+                         case T_V4SF:
+                           eltype = float_pointer_node;
+                           break;
+
+                         case T_DI:
+                         case T_V2DI:
+                           eltype = intDI_pointer_node;
+                           break;
+
+                         case T_DF:
+                         case T_V2DF:
+                           eltype = double_pointer_node;
+                           break;
+
+                         default:
+                           gcc_unreachable ();
+                         }
+                     }
+                   else
+                     {
+                       switch (insn_data[icode].operand[k].mode)
+                         {
+                         case VOIDmode:
+                           eltype = void_type_node;
+                           break;
+                           /* Scalars.  */
+                         case QImode:
+                           eltype = aarch64_simd_intQI_type_node;
+                           break;
+                         case HImode:
+                           eltype = aarch64_simd_intHI_type_node;
+                           break;
+                         case SImode:
+                           eltype = aarch64_simd_intSI_type_node;
+                           break;
+                         case SFmode:
+                           eltype = aarch64_simd_float_type_node;
+                           break;
+                         case DFmode:
+                           eltype = aarch64_simd_double_type_node;
+                           break;
+                         case DImode:
+                           eltype = aarch64_simd_intDI_type_node;
+                           break;
+                         case TImode:
+                           eltype = intTI_type_node;
+                           break;
+                         case EImode:
+                           eltype = intEI_type_node;
+                           break;
+                         case OImode:
+                           eltype = intOI_type_node;
+                           break;
+                         case CImode:
+                           eltype = intCI_type_node;
+                           break;
+                         case XImode:
+                           eltype = intXI_type_node;
+                           break;
+                           /* 64-bit vectors.  */
+                         case V8QImode:
+                           eltype = V8QI_type_node;
+                           break;
+                         case V4HImode:
+                           eltype = V4HI_type_node;
+                           break;
+                         case V2SImode:
+                           eltype = V2SI_type_node;
+                           break;
+                         case V2SFmode:
+                           eltype = V2SF_type_node;
+                           break;
+                           /* 128-bit vectors.  */
+                         case V16QImode:
+                           eltype = V16QI_type_node;
+                           break;
+                         case V8HImode:
+                           eltype = V8HI_type_node;
+                           break;
+                         case V4SImode:
+                           eltype = V4SI_type_node;
+                           break;
+                         case V4SFmode:
+                           eltype = V4SF_type_node;
+                           break;
+                         case V2DImode:
+                           eltype = V2DI_type_node;
+                           break;
+                         case V2DFmode:
+                           eltype = V2DF_type_node;
+                           break;
+                         default:
+                           gcc_unreachable ();
+                         }
+                     }
+
+                   if (k == 0 && !is_store)
+                     return_type = eltype;
+                   else
+                     args = tree_cons (NULL_TREE, eltype, args);
+                 }
+
+               ftype = build_function_type (return_type, args);
+             }
+             break;
+
+           case AARCH64_SIMD_RESULTPAIR:
+             {
+               switch (insn_data[icode].operand[1].mode)
+                 {
+                 case V8QImode:
+                   ftype = void_ftype_pv8qi_v8qi_v8qi;
+                   break;
+                 case V4HImode:
+                   ftype = void_ftype_pv4hi_v4hi_v4hi;
+                   break;
+                 case V2SImode:
+                   ftype = void_ftype_pv2si_v2si_v2si;
+                   break;
+                 case V2SFmode:
+                   ftype = void_ftype_pv2sf_v2sf_v2sf;
+                   break;
+                 case DImode:
+                   ftype = void_ftype_pdi_di_di;
+                   break;
+                 case V16QImode:
+                   ftype = void_ftype_pv16qi_v16qi_v16qi;
+                   break;
+                 case V8HImode:
+                   ftype = void_ftype_pv8hi_v8hi_v8hi;
+                   break;
+                 case V4SImode:
+                   ftype = void_ftype_pv4si_v4si_v4si;
+                   break;
+                 case V4SFmode:
+                   ftype = void_ftype_pv4sf_v4sf_v4sf;
+                   break;
+                 case V2DImode:
+                   ftype = void_ftype_pv2di_v2di_v2di;
+                   break;
+                 case V2DFmode:
+                   ftype = void_ftype_pv2df_v2df_v2df;
+                   break;
+                 default:
+                   gcc_unreachable ();
+                 }
+             }
+             break;
+
+           case AARCH64_SIMD_REINTERP:
+             {
+               /* We iterate over 6 doubleword types, then 6 quadword
+                  types.  */
+               int rhs_d = j % NUM_DREG_TYPES;
+               int rhs_q = (j - NUM_DREG_TYPES) % NUM_QREG_TYPES;
+               switch (insn_data[icode].operand[0].mode)
+                 {
+                 case V8QImode:
+                   ftype = reinterp_ftype_dreg[0][rhs_d];
+                   break;
+                 case V4HImode:
+                   ftype = reinterp_ftype_dreg[1][rhs_d];
+                   break;
+                 case V2SImode:
+                   ftype = reinterp_ftype_dreg[2][rhs_d];
+                   break;
+                 case V2SFmode:
+                   ftype = reinterp_ftype_dreg[3][rhs_d];
+                   break;
+                 case DImode:
+                   ftype = reinterp_ftype_dreg[4][rhs_d];
+                   break;
+                 case DFmode:
+                   ftype = reinterp_ftype_dreg[5][rhs_d];
+                   break;
+                 case V16QImode:
+                   ftype = reinterp_ftype_qreg[0][rhs_q];
+                   break;
+                 case V8HImode:
+                   ftype = reinterp_ftype_qreg[1][rhs_q];
+                   break;
+                 case V4SImode:
+                   ftype = reinterp_ftype_qreg[2][rhs_q];
+                   break;
+                 case V4SFmode:
+                   ftype = reinterp_ftype_qreg[3][rhs_q];
+                   break;
+                 case V2DImode:
+                   ftype = reinterp_ftype_qreg[4][rhs_q];
+                   break;
+                 case V2DFmode:
+                   ftype = reinterp_ftype_qreg[5][rhs_q];
+                   break;
+                 default:
+                   gcc_unreachable ();
+                 }
+             }
+             break;
+
+           default:
+             gcc_unreachable ();
+           }
+
+         gcc_assert (ftype != NULL);
+
+         snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s%s",
+                   d->name, modenames[j]);
+
+         add_builtin_function (namebuf, ftype, fcode++, BUILT_IN_MD, NULL,
+                               NULL_TREE);
+       }
+    }
+}
+
+static int
+aarch64_simd_builtin_compare (const void *a, const void *b)
+{
+  const aarch64_simd_builtin_datum *const key =
+    (const aarch64_simd_builtin_datum *) a;
+  const aarch64_simd_builtin_datum *const memb =
+    (const aarch64_simd_builtin_datum *) b;
+  unsigned int soughtcode = key->base_fcode;
+
+  if (soughtcode >= memb->base_fcode
+      && soughtcode < memb->base_fcode + memb->num_vars)
+    return 0;
+  else if (soughtcode < memb->base_fcode)
+    return -1;
+  else
+    return 1;
+}
+
+
+static enum insn_code
+locate_simd_builtin_icode (int fcode, aarch64_simd_itype * itype)
+{
+  aarch64_simd_builtin_datum key
+    = { NULL, (aarch64_simd_itype) 0, 0, {CODE_FOR_nothing}, 0, 0};
+  aarch64_simd_builtin_datum *found;
+  int idx;
+
+  key.base_fcode = fcode;
+  found = (aarch64_simd_builtin_datum *)
+    bsearch (&key, &aarch64_simd_builtin_data[0],
+            ARRAY_SIZE (aarch64_simd_builtin_data),
+            sizeof (aarch64_simd_builtin_data[0]),
+            aarch64_simd_builtin_compare);
+  gcc_assert (found);
+  idx = fcode - (int) found->base_fcode;
+  gcc_assert (idx >= 0 && idx < T_MAX && idx < (int) found->num_vars);
+
+  if (itype)
+    *itype = found->itype;
+
+  return found->codes[idx];
+}
+
+typedef enum
+{
+  SIMD_ARG_COPY_TO_REG,
+  SIMD_ARG_CONSTANT,
+  SIMD_ARG_STOP
+} builtin_simd_arg;
+
+#define SIMD_MAX_BUILTIN_ARGS 5
+
+static rtx
+aarch64_simd_expand_args (rtx target, int icode, int have_retval,
+                         tree exp, ...)
+{
+  va_list ap;
+  rtx pat;
+  tree arg[SIMD_MAX_BUILTIN_ARGS];
+  rtx op[SIMD_MAX_BUILTIN_ARGS];
+  enum machine_mode tmode = insn_data[icode].operand[0].mode;
+  enum machine_mode mode[SIMD_MAX_BUILTIN_ARGS];
+  int argc = 0;
+
+  if (have_retval
+      && (!target
+         || GET_MODE (target) != tmode
+         || !(*insn_data[icode].operand[0].predicate) (target, tmode)))
+    target = gen_reg_rtx (tmode);
+
+  va_start (ap, exp);
+
+  for (;;)
+    {
+      builtin_simd_arg thisarg = (builtin_simd_arg) va_arg (ap, int);
+
+      if (thisarg == SIMD_ARG_STOP)
+       break;
+      else
+       {
+         arg[argc] = CALL_EXPR_ARG (exp, argc);
+         op[argc] = expand_normal (arg[argc]);
+         mode[argc] = insn_data[icode].operand[argc + have_retval].mode;
+
+         switch (thisarg)
+           {
+           case SIMD_ARG_COPY_TO_REG:
+             /*gcc_assert (GET_MODE (op[argc]) == mode[argc]); */
+             if (!(*insn_data[icode].operand[argc + have_retval].predicate)
+                 (op[argc], mode[argc]))
+               op[argc] = copy_to_mode_reg (mode[argc], op[argc]);
+             break;
+
+           case SIMD_ARG_CONSTANT:
+             if (!(*insn_data[icode].operand[argc + have_retval].predicate)
+                 (op[argc], mode[argc]))
+               error_at (EXPR_LOCATION (exp), "incompatible type for argument %d, "
+                      "expected %<const int%>", argc + 1);
+             break;
+
+           case SIMD_ARG_STOP:
+             gcc_unreachable ();
+           }
+
+         argc++;
+       }
+    }
+
+  va_end (ap);
+
+  if (have_retval)
+    switch (argc)
+      {
+      case 1:
+       pat = GEN_FCN (icode) (target, op[0]);
+       break;
+
+      case 2:
+       pat = GEN_FCN (icode) (target, op[0], op[1]);
+       break;
+
+      case 3:
+       pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
+       break;
+
+      case 4:
+       pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
+       break;
+
+      case 5:
+       pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
+       break;
+
+      default:
+       gcc_unreachable ();
+      }
+  else
+    switch (argc)
+      {
+      case 1:
+       pat = GEN_FCN (icode) (op[0]);
+       break;
+
+      case 2:
+       pat = GEN_FCN (icode) (op[0], op[1]);
+       break;
+
+      case 3:
+       pat = GEN_FCN (icode) (op[0], op[1], op[2]);
+       break;
+
+      case 4:
+       pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
+       break;
+
+      case 5:
+       pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
+       break;
+
+      default:
+       gcc_unreachable ();
+      }
+
+  if (!pat)
+    return 0;
+
+  emit_insn (pat);
+
+  return target;
+}
+
+/* Expand an AArch64 AdvSIMD builtin(intrinsic).  */
+rtx
+aarch64_simd_expand_builtin (int fcode, tree exp, rtx target)
+{
+  aarch64_simd_itype itype;
+  enum insn_code icode = locate_simd_builtin_icode (fcode, &itype);
+
+  switch (itype)
+    {
+    case AARCH64_SIMD_UNOP:
+      return aarch64_simd_expand_args (target, icode, 1, exp,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_STOP);
+
+    case AARCH64_SIMD_BINOP:
+      {
+        rtx arg2 = expand_normal (CALL_EXPR_ARG (exp, 1));
+        /* Handle constants only if the predicate allows it.  */
+       bool op1_const_int_p =
+         (CONST_INT_P (arg2)
+          && (*insn_data[icode].operand[2].predicate)
+               (arg2, insn_data[icode].operand[2].mode));
+       return aarch64_simd_expand_args
+         (target, icode, 1, exp,
+          SIMD_ARG_COPY_TO_REG,
+          op1_const_int_p ? SIMD_ARG_CONSTANT : SIMD_ARG_COPY_TO_REG,
+          SIMD_ARG_STOP);
+      }
+
+    case AARCH64_SIMD_TERNOP:
+      return aarch64_simd_expand_args (target, icode, 1, exp,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_STOP);
+
+    case AARCH64_SIMD_QUADOP:
+      return aarch64_simd_expand_args (target, icode, 1, exp,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_STOP);
+    case AARCH64_SIMD_LOAD1:
+    case AARCH64_SIMD_LOADSTRUCT:
+      return aarch64_simd_expand_args (target, icode, 1, exp,
+                                      SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
+
+    case AARCH64_SIMD_STORESTRUCT:
+      return aarch64_simd_expand_args (target, icode, 0, exp,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
+
+    case AARCH64_SIMD_REINTERP:
+      return aarch64_simd_expand_args (target, icode, 1, exp,
+                                      SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
+
+    case AARCH64_SIMD_CREATE:
+      return aarch64_simd_expand_args (target, icode, 1, exp,
+                                      SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
+
+    case AARCH64_SIMD_COMBINE:
+      return aarch64_simd_expand_args (target, icode, 1, exp,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
+
+    case AARCH64_SIMD_GETLANE:
+      return aarch64_simd_expand_args (target, icode, 1, exp,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_CONSTANT,
+                                      SIMD_ARG_STOP);
+
+    case AARCH64_SIMD_SETLANE:
+      return aarch64_simd_expand_args (target, icode, 1, exp,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_CONSTANT,
+                                      SIMD_ARG_STOP);
+
+    case AARCH64_SIMD_SHIFTIMM:
+      return aarch64_simd_expand_args (target, icode, 1, exp,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_CONSTANT,
+                                      SIMD_ARG_STOP);
+
+    case AARCH64_SIMD_SHIFTACC:
+    case AARCH64_SIMD_SHIFTINSERT:
+      return aarch64_simd_expand_args (target, icode, 1, exp,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_COPY_TO_REG,
+                                      SIMD_ARG_CONSTANT,
+                                      SIMD_ARG_STOP);
+
+    default:
+      gcc_unreachable ();
+    }
+}
diff --git a/gcc/config/aarch64/aarch64-cores.def b/gcc/config/aarch64/aarch64-cores.def
new file mode 100644 (file)
index 0000000..06cc982
--- /dev/null
@@ -0,0 +1,38 @@
+/* Copyright (C) 2011, 2012 Free Software Foundation, Inc.
+   Contributed by ARM Ltd.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+/* This is a list of cores that implement AArch64.
+
+   Before using #include to read this file, define a macro:
+
+      AARCH64_CORE(CORE_NAME, CORE_IDENT, ARCH, FLAGS, COSTS)
+
+   The CORE_NAME is the name of the core, represented as a string constant.
+   The CORE_IDENT is the name of the core, represented as an identifier.
+   ARCH is the architecture revision implemented by the chip.
+   FLAGS are the bitwise-or of the traits that apply to that core.
+   This need not include flags implied by the architecture.
+   COSTS is the name of the rtx_costs routine to use.  */
+
+/* V8 Architecture Processors.
+   This list currently contains example CPUs that implement AArch64, and
+   therefore serves as a template for adding more CPUs in the future.  */
+
+AARCH64_CORE("example-1",            large,         8,  AARCH64_FL_FPSIMD,    generic)
+AARCH64_CORE("example-2",            small,         8,  AARCH64_FL_FPSIMD,    generic)
diff --git a/gcc/config/aarch64/aarch64-elf-raw.h b/gcc/config/aarch64/aarch64-elf-raw.h
new file mode 100644 (file)
index 0000000..d9ec53f
--- /dev/null
@@ -0,0 +1,32 @@
+/* Machine description for AArch64 architecture.
+   Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+   Contributed by ARM Ltd.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+/* Support for bare-metal builds.  */
+#ifndef GCC_AARCH64_ELF_RAW_H
+#define GCC_AARCH64_ELF_RAW_H
+
+#define STARTFILE_SPEC " crti%O%s crtbegin%O%s crt0%O%s"
+#define ENDFILE_SPEC " crtend%O%s crtn%O%s"
+
+#ifndef LINK_SPEC
+#define LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL} -X"
+#endif
+
+#endif /* GCC_AARCH64_ELF_RAW_H */
diff --git a/gcc/config/aarch64/aarch64-elf.h b/gcc/config/aarch64/aarch64-elf.h
new file mode 100644 (file)
index 0000000..1c021d0
--- /dev/null
@@ -0,0 +1,132 @@
+/* Machine description for AArch64 architecture.
+   Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+   Contributed by ARM Ltd.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef GCC_AARCH64_ELF_H
+#define GCC_AARCH64_ELF_H
+
+
+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
+  aarch64_asm_output_labelref (FILE, NAME)
+
+#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2)     \
+  do                                           \
+    {                                          \
+      assemble_name (FILE, NAME1);             \
+      fputs (" = ", FILE);                     \
+      assemble_name (FILE, NAME2);             \
+      fputc ('\n', FILE);                      \
+    } while (0)
+
+#define TEXT_SECTION_ASM_OP    "\t.text"
+#define DATA_SECTION_ASM_OP    "\t.data"
+#define BSS_SECTION_ASM_OP     "\t.bss"
+
+#define CTORS_SECTION_ASM_OP "\t.section\t.init_array,\"aw\",%init_array"
+#define DTORS_SECTION_ASM_OP "\t.section\t.fini_array,\"aw\",%fini_array"
+
+#undef INIT_SECTION_ASM_OP
+#undef FINI_SECTION_ASM_OP
+#define INIT_ARRAY_SECTION_ASM_OP CTORS_SECTION_ASM_OP
+#define FINI_ARRAY_SECTION_ASM_OP DTORS_SECTION_ASM_OP
+
+/* Since we use .init_array/.fini_array we don't need the markers at
+   the start and end of the ctors/dtors arrays.  */
+#define CTOR_LIST_BEGIN asm (CTORS_SECTION_ASM_OP)
+#define CTOR_LIST_END          /* empty */
+#define DTOR_LIST_BEGIN asm (DTORS_SECTION_ASM_OP)
+#define DTOR_LIST_END          /* empty */
+
+#undef TARGET_ASM_CONSTRUCTOR
+#define TARGET_ASM_CONSTRUCTOR aarch64_elf_asm_constructor
+
+#undef TARGET_ASM_DESTRUCTOR
+#define TARGET_ASM_DESTRUCTOR aarch64_elf_asm_destructor
+
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+/* Support for -falign-* switches.  Use .p2align to ensure that code
+   sections are padded with NOP instructions, rather than zeros.  */
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE, LOG, MAX_SKIP)         \
+  do                                                           \
+    {                                                          \
+      if ((LOG) != 0)                                          \
+       {                                                       \
+         if ((MAX_SKIP) == 0)                                  \
+           fprintf ((FILE), "\t.p2align %d\n", (int) (LOG));   \
+         else                                                  \
+           fprintf ((FILE), "\t.p2align %d,,%d\n",             \
+                    (int) (LOG), (int) (MAX_SKIP));            \
+       }                                                       \
+    } while (0)
+
+#endif /* HAVE_GAS_MAX_SKIP_P2ALIGN */
+
+#define JUMP_TABLES_IN_TEXT_SECTION 0
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL)             \
+  do {                                                                 \
+    switch (GET_MODE (BODY))                                           \
+      {                                                                        \
+      case QImode:                                                     \
+       asm_fprintf (STREAM, "\t.byte\t(%LL%d - %LLrtx%d) / 4\n",       \
+                    VALUE, REL);                                       \
+       break;                                                          \
+      case HImode:                                                     \
+       asm_fprintf (STREAM, "\t.2byte\t(%LL%d - %LLrtx%d) / 4\n",      \
+                    VALUE, REL);                                       \
+       break;                                                          \
+      case SImode:                                                     \
+      case DImode: /* See comment in aarch64_output_casesi.  */                \
+       asm_fprintf (STREAM, "\t.word\t(%LL%d - %LLrtx%d) / 4\n",       \
+                    VALUE, REL);                                       \
+       break;                                                          \
+      default:                                                         \
+       gcc_unreachable ();                                             \
+      }                                                                        \
+  } while (0)
+
+#define ASM_OUTPUT_ALIGN(STREAM, POWER)                \
+  fprintf(STREAM, "\t.align\t%d\n", (int)POWER)
+
+#define ASM_COMMENT_START "//"
+
+#define REGISTER_PREFIX                ""
+#define LOCAL_LABEL_PREFIX     "."
+#define USER_LABEL_PREFIX      ""
+
+#define GLOBAL_ASM_OP "\t.global\t"
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "\
+%{mbig-endian:-EB} \
+%{mlittle-endian:-EL} \
+%{mcpu=*:-mcpu=%*} \
+%{march=*:-march=%*}"
+#endif
+
+#undef TYPE_OPERAND_FMT
+#define TYPE_OPERAND_FMT       "%%%s"
+
+#undef TARGET_ASM_NAMED_SECTION
+#define TARGET_ASM_NAMED_SECTION  aarch64_elf_asm_named_section
+
+/* Stabs debug not required.  */
+#undef DBX_DEBUGGING_INFO
+
+#endif /* GCC_AARCH64_ELF_H */
diff --git a/gcc/config/aarch64/aarch64-generic.md b/gcc/config/aarch64/aarch64-generic.md
new file mode 100644 (file)
index 0000000..4c9e455
--- /dev/null
@@ -0,0 +1,38 @@
+;; Machine description for AArch64 architecture.
+;; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Generic scheduler
+
+(define_automaton "aarch64")
+
+(define_cpu_unit "core" "aarch64")
+
+(define_attr "is_load" "yes,no"
+  (if_then_else (eq_attr "v8type" "fpsimd_load,fpsimd_load2,load1,load2")
+       (const_string "yes")
+       (const_string "no")))
+
+(define_insn_reservation "load" 2
+  (eq_attr "is_load" "yes")
+  "core")
+
+(define_insn_reservation "nonload" 1
+  (eq_attr "is_load" "no")
+  "core")
diff --git a/gcc/config/aarch64/aarch64-linux.h b/gcc/config/aarch64/aarch64-linux.h
new file mode 100644 (file)
index 0000000..95aaafa
--- /dev/null
@@ -0,0 +1,44 @@
+/* Machine description for AArch64 architecture.
+   Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+   Contributed by ARM Ltd.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef GCC_AARCH64_LINUX_H
+#define GCC_AARCH64_LINUX_H
+
+#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-aarch64.so.1"
+
+#define LINUX_TARGET_LINK_SPEC  "%{h*}         \
+   %{static:-Bstatic}                          \
+   %{shared:-shared}                           \
+   %{symbolic:-Bsymbolic}                      \
+   %{rdynamic:-export-dynamic}                 \
+   -dynamic-linker " GNU_USER_DYNAMIC_LINKER " \
+   -X                                          \
+   %{mbig-endian:-EB} %{mlittle-endian:-EL}"
+
+#define LINK_SPEC LINUX_TARGET_LINK_SPEC
+
+#define TARGET_OS_CPP_BUILTINS()               \
+  do                                           \
+    {                                          \
+       GNU_USER_TARGET_OS_CPP_BUILTINS();      \
+    }                                          \
+  while (0)
+
+#endif  /* GCC_AARCH64_LINUX_H */
diff --git a/gcc/config/aarch64/aarch64-modes.def b/gcc/config/aarch64/aarch64-modes.def
new file mode 100644 (file)
index 0000000..ac05881
--- /dev/null
@@ -0,0 +1,54 @@
+/* Machine description for AArch64 architecture.
+   Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+   Contributed by ARM Ltd.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+CC_MODE (CCFP);
+CC_MODE (CCFPE);
+CC_MODE (CC_SWP);
+CC_MODE (CC_ZESWP); /* zero-extend LHS (but swap to make it RHS).  */
+CC_MODE (CC_SESWP); /* sign-extend LHS (but swap to make it RHS).  */
+CC_MODE (CC_NZ);    /* Only N and Z bits of condition flags are valid.  */
+
+/* Vector modes.  */
+VECTOR_MODES (INT, 8);        /*       V8QI V4HI V2SI.  */
+VECTOR_MODES (INT, 16);       /* V16QI V8HI V4SI V2DI.  */
+VECTOR_MODES (FLOAT, 8);      /*                 V2SF.  */
+VECTOR_MODES (FLOAT, 16);     /*            V4SF V2DF.  */
+
+/* Oct Int: 256-bit integer mode needed for 32-byte vector arguments.  */
+INT_MODE (OI, 32);
+
+/* Opaque integer modes for 3, 6 or 8 Neon double registers (2 is
+   TImode).  */
+INT_MODE (EI, 24);
+INT_MODE (CI, 48);
+INT_MODE (XI, 64);
+
+/* Vector modes for register lists.  */
+VECTOR_MODES (INT, 32);                /* V32QI V16HI V8SI V4DI.  */
+VECTOR_MODES (FLOAT, 32);      /* V8SF V4DF.  */
+
+VECTOR_MODES (INT, 48);                /* V32QI V16HI V8SI V4DI.  */
+VECTOR_MODES (FLOAT, 48);      /* V8SF V4DF.  */
+
+VECTOR_MODES (INT, 64);                /* V32QI V16HI V8SI V4DI.  */
+VECTOR_MODES (FLOAT, 64);      /* V8SF V4DF.  */
+
+/* Quad float: 128-bit floating mode for long doubles.  */
+FLOAT_MODE (TF, 16, ieee_quad_format);
diff --git a/gcc/config/aarch64/aarch64-option-extensions.def b/gcc/config/aarch64/aarch64-option-extensions.def
new file mode 100644 (file)
index 0000000..a5d298a
--- /dev/null
@@ -0,0 +1,37 @@
+/* Copyright (C) 2012 Free Software Foundation, Inc.
+   Contributed by ARM Ltd.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+/* This is a list of ISA extentsions in AArch64.
+
+   Before using #include to read this file, define a macro:
+
+      AARCH64_OPT_EXTENSION(EXT_NAME, FLAGS_ON, FLAGS_OFF)
+
+   EXT_NAME is the name of the extension, represented as a string constant.
+   FLAGS_ON are the bitwise-or of the features that the extension adds.
+   FLAGS_OFF are the bitwise-or of the features that the extension removes.  */
+
+/* V8 Architecture Extensions.
+   This list currently contains example extensions for CPUs that implement
+   AArch64, and therefore serves as a template for adding more CPUs in the
+   future.  */
+
+AARCH64_OPT_EXTENSION("fp",    AARCH64_FL_FP,  AARCH64_FL_FPSIMD | AARCH64_FL_CRYPTO)
+AARCH64_OPT_EXTENSION("simd",  AARCH64_FL_FPSIMD,      AARCH64_FL_SIMD | AARCH64_FL_CRYPTO)
+AARCH64_OPT_EXTENSION("crypto",        AARCH64_FL_CRYPTO | AARCH64_FL_FPSIMD,  AARCH64_FL_CRYPTO)
diff --git a/gcc/config/aarch64/aarch64-opts.h b/gcc/config/aarch64/aarch64-opts.h
new file mode 100644 (file)
index 0000000..6d7a2fd
--- /dev/null
@@ -0,0 +1,64 @@
+/* Copyright (C) 2011, 2012 Free Software Foundation, Inc.
+   Contributed by ARM Ltd.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published
+   by the Free Software Foundation; either version 3, or (at your
+   option) any later version.
+
+   GCC is distributed in the hope that it will be useful, but WITHOUT
+   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+   License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+/* Definitions for option handling for AArch64.  */
+
+#ifndef GCC_AARCH64_OPTS_H
+#define GCC_AARCH64_OPTS_H
+
+/* The various cores that implement AArch64.  */
+enum aarch64_processor
+{
+#define AARCH64_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
+  IDENT,
+#include "aarch64-cores.def"
+#undef AARCH64_CORE
+  /* Used to indicate that no processor has been specified.  */
+  generic,
+  /* Used to mark the end of the processor table.  */
+  aarch64_none
+};
+
+/* TLS types.  */
+enum aarch64_tls_type {
+  TLS_TRADITIONAL,
+  TLS_DESCRIPTORS
+};
+
+/* The code model defines the address generation strategy.
+   Most have a PIC and non-PIC variant.  */
+enum aarch64_code_model {
+  /* Static code and data fit within a 1MB region.
+     Not fully implemented, mostly treated as SMALL.  */
+  AARCH64_CMODEL_TINY,
+  /* Static code, data and GOT/PLT fit within a 1MB region.
+     Not fully implemented, mostly treated as SMALL_PIC.  */
+  AARCH64_CMODEL_TINY_PIC,
+  /* Static code and data fit within a 4GB region.
+     The default non-PIC code model.  */
+  AARCH64_CMODEL_SMALL,
+  /* Static code, data and GOT/PLT fit within a 4GB region.
+     The default PIC code model.  */
+  AARCH64_CMODEL_SMALL_PIC,
+  /* No assumptions about addresses of code and data.
+     The PIC variant is not yet implemented.  */
+  AARCH64_CMODEL_LARGE
+};
+
+#endif
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
new file mode 100644 (file)
index 0000000..765d192
--- /dev/null
@@ -0,0 +1,260 @@
+/* Machine description for AArch64 architecture.
+   Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+   Contributed by ARM Ltd.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+
+#ifndef GCC_AARCH64_PROTOS_H
+#define GCC_AARCH64_PROTOS_H
+
+ /* This generator struct and enum is used to wrap a function pointer
+    to a function that generates an RTX fragment but takes either 3 or
+    4 operands.
+
+    The omn flavour, wraps a function that generates a synchronization
+    instruction from 3 operands: old value, memory and new value.
+
+    The omrn flavour, wraps a function that generates a synchronization
+    instruction from 4 operands: old value, memory, required value and
+    new value.  */
+
+enum aarch64_sync_generator_tag
+{
+  aarch64_sync_generator_omn,
+  aarch64_sync_generator_omrn
+};
+
+ /* Wrapper to pass around a polymorphic pointer to a sync instruction
+    generator and.  */
+struct aarch64_sync_generator
+{
+  enum aarch64_sync_generator_tag op;
+  union
+  {
+    rtx (*omn) (rtx, rtx, rtx);
+    rtx (*omrn) (rtx, rtx, rtx, rtx);
+  } u;
+};
+
+/*
+  SYMBOL_CONTEXT_ADR
+  The symbol is used in a load-address operation.
+  SYMBOL_CONTEXT_MEM
+  The symbol is used as the address in a MEM.
+ */
+enum aarch64_symbol_context
+{
+  SYMBOL_CONTEXT_MEM,
+  SYMBOL_CONTEXT_ADR
+};
+
+/* SYMBOL_SMALL_ABSOLUTE: Generate symbol accesses through
+   high and lo relocs that calculate the base address using a PC
+   relative reloc.
+   So to get the address of foo, we generate
+   adrp x0, foo
+   add  x0, x0, :lo12:foo
+
+   To load or store something to foo, we could use the corresponding
+   load store variants that generate an
+   ldr x0, [x0,:lo12:foo]
+   or
+   str x1, [x0, :lo12:foo]
+
+   This corresponds to the small code model of the compiler.
+
+   SYMBOL_SMALL_GOT: Similar to the one above but this
+   gives us the GOT entry of the symbol being referred to :
+   Thus calculating the GOT entry for foo is done using the
+   following sequence of instructions.  The ADRP instruction
+   gets us to the page containing the GOT entry of the symbol
+   and the got_lo12 gets us the actual offset in it.
+
+   adrp  x0, :got:foo
+   ldr   x0, [x0, :gotoff_lo12:foo]
+
+   This corresponds to the small PIC model of the compiler.
+
+   SYMBOL_SMALL_TLSGD
+   SYMBOL_SMALL_TLSDESC
+   SYMBOL_SMALL_GOTTPREL
+   SYMBOL_SMALL_TPREL
+   Each of of these represents a thread-local symbol, and corresponds to the
+   thread local storage relocation operator for the symbol being referred to.
+
+   SYMBOL_FORCE_TO_MEM : Global variables are addressed using
+   constant pool.  All variable addresses are spilled into constant
+   pools.  The constant pools themselves are addressed using PC
+   relative accesses.  This only works for the large code model.
+ */
+enum aarch64_symbol_type
+{
+  SYMBOL_SMALL_ABSOLUTE,
+  SYMBOL_SMALL_GOT,
+  SYMBOL_SMALL_TLSGD,
+  SYMBOL_SMALL_TLSDESC,
+  SYMBOL_SMALL_GOTTPREL,
+  SYMBOL_SMALL_TPREL,
+  SYMBOL_FORCE_TO_MEM
+};
+
+/* A set of tuning parameters contains references to size and time
+   cost models and vectors for address cost calculations, register
+   move costs and memory move costs.  */
+
+/* Extra costs for specific insns.  Only records the cost above a
+   single insn.  */
+
+struct cpu_rtx_cost_table
+{
+  const int memory_load;
+  const int memory_store;
+  const int register_shift;
+  const int int_divide;
+  const int float_divide;
+  const int double_divide;
+  const int int_multiply;
+  const int int_multiply_extend;
+  const int int_multiply_add;
+  const int int_multiply_extend_add;
+  const int float_multiply;
+  const int double_multiply;
+};
+
+/* Additional cost for addresses.  */
+struct cpu_addrcost_table
+{
+  const int pre_modify;
+  const int post_modify;
+  const int register_offset;
+  const int register_extend;
+  const int imm_offset;
+};
+
+/* Additional costs for register copies.  Cost is for one register.  */
+struct cpu_regmove_cost
+{
+  const int GP2GP;
+  const int GP2FP;
+  const int FP2GP;
+  const int FP2FP;
+};
+
+struct tune_params
+{
+  const struct cpu_rtx_cost_table *const insn_extra_cost;
+  const struct cpu_addrcost_table *const addr_cost;
+  const struct cpu_regmove_cost *const regmove_cost;
+  const int memmov_cost;
+};
+
+HOST_WIDE_INT aarch64_initial_elimination_offset (unsigned, unsigned);
+bool aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode);
+bool aarch64_const_double_zero_rtx_p (rtx);
+bool aarch64_constant_address_p (rtx);
+bool aarch64_function_arg_regno_p (unsigned);
+bool aarch64_gen_movmemqi (rtx *);
+bool aarch64_is_extend_from_extract (enum machine_mode, rtx, rtx);
+bool aarch64_is_long_call_p (rtx);
+bool aarch64_label_mentioned_p (rtx);
+bool aarch64_legitimate_pic_operand_p (rtx);
+bool aarch64_move_imm (HOST_WIDE_INT, enum machine_mode);
+bool aarch64_pad_arg_upward (enum machine_mode, const_tree);
+bool aarch64_pad_reg_upward (enum machine_mode, const_tree, bool);
+bool aarch64_regno_ok_for_base_p (int, bool);
+bool aarch64_regno_ok_for_index_p (int, bool);
+bool aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode);
+bool aarch64_simd_imm_zero_p (rtx, enum machine_mode);
+bool aarch64_simd_shift_imm_p (rtx, enum machine_mode, bool);
+bool aarch64_symbolic_address_p (rtx);
+bool aarch64_symbolic_constant_p (rtx, enum aarch64_symbol_context,
+                                 enum aarch64_symbol_type *);
+bool aarch64_uimm12_shift (HOST_WIDE_INT);
+const char *aarch64_output_casesi (rtx *);
+const char *aarch64_output_sync_insn (rtx, rtx *);
+const char *aarch64_output_sync_lock_release (rtx, rtx);
+enum aarch64_symbol_type aarch64_classify_symbol (rtx,
+                                                 enum aarch64_symbol_context);
+enum aarch64_symbol_type aarch64_classify_tls_symbol (rtx);
+enum reg_class aarch64_regno_regclass (unsigned);
+int aarch64_asm_preferred_eh_data_format (int, int);
+int aarch64_hard_regno_mode_ok (unsigned, enum machine_mode);
+int aarch64_hard_regno_nregs (unsigned, enum machine_mode);
+int aarch64_simd_attr_length_move (rtx);
+int aarch64_simd_immediate_valid_for_move (rtx, enum machine_mode, rtx *,
+                                          int *, unsigned char *, int *,
+                                          int *);
+int aarch64_uxt_size (int, HOST_WIDE_INT);
+rtx aarch64_final_eh_return_addr (void);
+rtx aarch64_legitimize_reload_address (rtx *, enum machine_mode, int, int, int);
+const char *aarch64_output_move_struct (rtx *operands);
+rtx aarch64_return_addr (int, rtx);
+rtx aarch64_simd_gen_const_vector_dup (enum machine_mode, int);
+bool aarch64_simd_mem_operand_p (rtx);
+rtx aarch64_simd_vect_par_cnst_half (enum machine_mode, bool);
+rtx aarch64_tls_get_addr (void);
+unsigned aarch64_dbx_register_number (unsigned);
+unsigned aarch64_trampoline_size (void);
+unsigned aarch64_sync_loop_insns (rtx, rtx *);
+void aarch64_asm_output_labelref (FILE *, const char *);
+void aarch64_elf_asm_named_section (const char *, unsigned, tree);
+void aarch64_expand_epilogue (bool);
+void aarch64_expand_mov_immediate (rtx, rtx);
+void aarch64_expand_prologue (void);
+void aarch64_expand_sync (enum machine_mode, struct aarch64_sync_generator *,
+                         rtx, rtx, rtx, rtx);
+void aarch64_function_profiler (FILE *, int);
+void aarch64_init_cumulative_args (CUMULATIVE_ARGS *, const_tree, rtx,
+                                  const_tree, unsigned);
+void aarch64_init_expanders (void);
+void aarch64_print_operand (FILE *, rtx, char);
+void aarch64_print_operand_address (FILE *, rtx);
+
+/* Initialize builtins for SIMD intrinsics.  */
+void init_aarch64_simd_builtins (void);
+
+void aarch64_simd_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+void aarch64_simd_disambiguate_copy (rtx *, rtx *, rtx *, unsigned int);
+
+/* Emit code to place a AdvSIMD pair result in memory locations (with equal
+   registers).  */
+void aarch64_simd_emit_pair_result_insn (enum machine_mode,
+                                        rtx (*intfn) (rtx, rtx, rtx), rtx,
+                                        rtx);
+
+/* Expand builtins for SIMD intrinsics.  */
+rtx aarch64_simd_expand_builtin (int, tree, rtx);
+
+void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+
+/* Emit code for reinterprets.  */
+void aarch64_simd_reinterpret (rtx, rtx);
+
+void aarch64_split_128bit_move (rtx, rtx);
+
+bool aarch64_split_128bit_move_p (rtx, rtx);
+
+#if defined (RTX_CODE)
+
+bool aarch64_legitimate_address_p (enum machine_mode, rtx, RTX_CODE, bool);
+enum machine_mode aarch64_select_cc_mode (RTX_CODE, rtx, rtx);
+rtx aarch64_gen_compare_reg (RTX_CODE, rtx, rtx);
+
+#endif /* RTX_CODE */
+
+#endif /* GCC_AARCH64_PROTOS_H */
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
new file mode 100644 (file)
index 0000000..a7ddfb1
--- /dev/null
@@ -0,0 +1,3264 @@
+;; Machine description for AArch64 AdvSIMD architecture.
+;; Copyright (C) 2011, 2012 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+; Main data types used by the insntructions
+
+(define_attr "simd_mode" "unknown,none,V8QI,V16QI,V4HI,V8HI,V2SI,V4SI,V2DI,V2SF,V4SF,V2DF,OI,CI,XI,DI,DF,SI,HI,QI"
+  (const_string "unknown"))
+
+
+; Classification of AdvSIMD instructions for scheduling purposes.
+; Do not set this attribute and the "v8type" attribute together in
+; any instruction pattern.
+
+; simd_abd              integer absolute difference and accumulate.
+; simd_abdl             integer absolute difference and accumulate (long).
+; simd_adal             integer add and accumulate (long).
+; simd_add              integer addition/subtraction.
+; simd_addl             integer addition/subtraction (long).
+; simd_addlv            across lanes integer sum (long).
+; simd_addn             integer addition/subtraction (narrow).
+; simd_addn2            integer addition/subtraction (narrow, high).
+; simd_addv             across lanes integer sum.
+; simd_cls              count leading sign/zero bits.
+; simd_cmp              compare / create mask.
+; simd_cnt              population count.
+; simd_dup              duplicate element.
+; simd_dupgp            duplicate general purpose register.
+; simd_ext              bitwise extract from pair.
+; simd_fadd             floating point add/sub.
+; simd_fcmp             floating point compare.
+; simd_fcvti            floating point convert to integer.
+; simd_fcvtl            floating-point convert upsize.
+; simd_fcvtn            floating-point convert downsize (narrow).
+; simd_fcvtn2           floating-point convert downsize (narrow, high).
+; simd_fdiv             floating point division.
+; simd_fminmax          floating point min/max.
+; simd_fminmaxv         across lanes floating point min/max.
+; simd_fmla             floating point multiply-add.
+; simd_fmla_elt         floating point multiply-add (by element).
+; simd_fmul             floating point multiply.
+; simd_fmul_elt         floating point multiply (by element).
+; simd_fnegabs          floating point neg/abs.
+; simd_frcpe            floating point reciprocal estimate.
+; simd_frcps            floating point reciprocal step.
+; simd_frecx            floating point reciprocal exponent.
+; simd_frint            floating point round to integer.
+; simd_fsqrt            floating point square root.
+; simd_icvtf            integer convert to floating point.
+; simd_ins              insert element.
+; simd_insgp            insert general purpose register.
+; simd_load1            load multiple structures to one register (LD1).
+; simd_load1r           load single structure to all lanes of one register (LD1R).
+; simd_load1s           load single structure to one lane of one register (LD1 [index]).
+; simd_load2            load multiple structures to two registers (LD1, LD2).
+; simd_load2r           load single structure to all lanes of two registers (LD1R, LD2R).
+; simd_load2s           load single structure to one lane of two registers (LD2 [index]).
+; simd_load3            load multiple structures to three registers (LD1, LD3).
+; simd_load3r           load single structure to all lanes of three registers (LD3R).
+; simd_load3s           load single structure to one lane of three registers (LD3 [index]).
+; simd_load4            load multiple structures to four registers (LD1, LD2, LD4).
+; simd_load4r           load single structure to all lanes of four registers (LD4R).
+; simd_load4s           load single structure to one lane of four registers (LD4 [index]).
+; simd_logic            logical operation.
+; simd_logic_imm        logcial operation (immediate).
+; simd_minmax           integer min/max.
+; simd_minmaxv          across lanes integer min/max,
+; simd_mla              integer multiply-accumulate.
+; simd_mla_elt          integer multiply-accumulate (by element).
+; simd_mlal             integer multiply-accumulate (long).
+; simd_mlal_elt         integer multiply-accumulate (by element, long).
+; simd_move             move register.
+; simd_move_imm         move immediate.
+; simd_movgp            move element to general purpose register.
+; simd_mul              integer multiply.
+; simd_mul_elt          integer multiply (by element).
+; simd_mull             integer multiply (long).
+; simd_mull_elt         integer multiply (by element, long).
+; simd_negabs           integer negate/absolute.
+; simd_rbit             bitwise reverse.
+; simd_rcpe             integer reciprocal estimate.
+; simd_rcps             integer reciprocal square root.
+; simd_rev              element reverse.
+; simd_sat_add          integer saturating addition/subtraction.
+; simd_sat_mlal         integer saturating multiply-accumulate (long).
+; simd_sat_mlal_elt     integer saturating multiply-accumulate (by element, long).
+; simd_sat_mul          integer saturating multiply.
+; simd_sat_mul_elt      integer saturating multiply (by element).
+; simd_sat_mull         integer saturating multiply (long).
+; simd_sat_mull_elt     integer saturating multiply (by element, long).
+; simd_sat_negabs       integer saturating negate/absolute.
+; simd_sat_shift        integer saturating shift.
+; simd_sat_shift_imm    integer saturating shift (immediate).
+; simd_sat_shiftn_imm   integer saturating shift (narrow, immediate).
+; simd_sat_shiftn2_imm  integer saturating shift (narrow, high, immediate).
+; simd_shift            shift register/vector.
+; simd_shift_acc        shift accumulate.
+; simd_shift_imm        shift immediate.
+; simd_shift_imm_acc    shift immediate and accumualte.
+; simd_shiftl           shift register/vector (long).
+; simd_shiftl_imm       shift register/vector (long, immediate).
+; simd_shiftn_imm       shift register/vector (narrow, immediate).
+; simd_shiftn2_imm      shift register/vector (narrow, high, immediate).
+; simd_store1           store multiple structures from one register (ST1).
+; simd_store1s          store single structure from one lane of one register (ST1 [index]).
+; simd_store2           store multiple structures from two registers (ST1, ST2).
+; simd_store2s          store single structure from one lane of two registers (ST2 [index]).
+; simd_store3           store multiple structures from three registers (ST1, ST3).
+; simd_store3s          store single structure from one lane of three register (ST3 [index]).
+; simd_store4           store multiple structures from four registers (ST1, ST2, ST4).
+; simd_store4s          store single structure from one lane for four registers (ST4 [index]).
+; simd_tbl              table lookup.
+; simd_trn              transpose.
+; simd_zip              zip/unzip.
+
+(define_attr "simd_type"
+   "simd_abd,\
+   simd_abdl,\
+   simd_adal,\
+   simd_add,\
+   simd_addl,\
+   simd_addlv,\
+   simd_addn,\
+   simd_addn2,\
+   simd_addv,\
+   simd_cls,\
+   simd_cmp,\
+   simd_cnt,\
+   simd_dup,\
+   simd_dupgp,\
+   simd_ext,\
+   simd_fadd,\
+   simd_fcmp,\
+   simd_fcvti,\
+   simd_fcvtl,\
+   simd_fcvtn,\
+   simd_fcvtn2,\
+   simd_fdiv,\
+   simd_fminmax,\
+   simd_fminmaxv,\
+   simd_fmla,\
+   simd_fmla_elt,\
+   simd_fmul,\
+   simd_fmul_elt,\
+   simd_fnegabs,\
+   simd_frcpe,\
+   simd_frcps,\
+   simd_frecx,\
+   simd_frint,\
+   simd_fsqrt,\
+   simd_icvtf,\
+   simd_ins,\
+   simd_insgp,\
+   simd_load1,\
+   simd_load1r,\
+   simd_load1s,\
+   simd_load2,\
+   simd_load2r,\
+   simd_load2s,\
+   simd_load3,\
+   simd_load3r,\
+   simd_load3s,\
+   simd_load4,\
+   simd_load4r,\
+   simd_load4s,\
+   simd_logic,\
+   simd_logic_imm,\
+   simd_minmax,\
+   simd_minmaxv,\
+   simd_mla,\
+   simd_mla_elt,\
+   simd_mlal,\
+   simd_mlal_elt,\
+   simd_movgp,\
+   simd_move,\
+   simd_move_imm,\
+   simd_mul,\
+   simd_mul_elt,\
+   simd_mull,\
+   simd_mull_elt,\
+   simd_negabs,\
+   simd_rbit,\
+   simd_rcpe,\
+   simd_rcps,\
+   simd_rev,\
+   simd_sat_add,\
+   simd_sat_mlal,\
+   simd_sat_mlal_elt,\
+   simd_sat_mul,\
+   simd_sat_mul_elt,\
+   simd_sat_mull,\
+   simd_sat_mull_elt,\
+   simd_sat_negabs,\
+   simd_sat_shift,\
+   simd_sat_shift_imm,\
+   simd_sat_shiftn_imm,\
+   simd_sat_shiftn2_imm,\
+   simd_shift,\
+   simd_shift_acc,\
+   simd_shift_imm,\
+   simd_shift_imm_acc,\
+   simd_shiftl,\
+   simd_shiftl_imm,\
+   simd_shiftn_imm,\
+   simd_shiftn2_imm,\
+   simd_store1,\
+   simd_store1s,\
+   simd_store2,\
+   simd_store2s,\
+   simd_store3,\
+   simd_store3s,\
+   simd_store4,\
+   simd_store4s,\
+   simd_tbl,\
+   simd_trn,\
+   simd_zip,\
+   none"
+  (const_string "none"))
+
+
+; The "neon_type" attribute is used by the AArch32 backend.  Below is a mapping
+; from "simd_type" to "neon_type".
+
+(define_attr "neon_type"
+   "neon_int_1,neon_int_2,neon_int_3,neon_int_4,neon_int_5,neon_vqneg_vqabs,
+   neon_vmov,neon_vaba,neon_vsma,neon_vaba_qqq,
+   neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,neon_mul_qqq_8_16_32_ddd_32,
+   neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar,
+   neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,neon_mla_qqq_8_16,
+   neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long,
+   neon_mla_qqq_32_qqd_32_scalar,neon_mul_ddd_16_scalar_32_16_long_scalar,
+   neon_mul_qqd_32_scalar,neon_mla_ddd_16_scalar_qdd_32_16_long_scalar,
+   neon_shift_1,neon_shift_2,neon_shift_3,neon_vshl_ddd,
+   neon_vqshl_vrshl_vqrshl_qqq,neon_vsra_vrsra,neon_fp_vadd_ddd_vabs_dd,
+   neon_fp_vadd_qqq_vabs_qq,neon_fp_vsum,neon_fp_vmul_ddd,neon_fp_vmul_qqd,
+   neon_fp_vmla_ddd,neon_fp_vmla_qqq,neon_fp_vmla_ddd_scalar,
+   neon_fp_vmla_qqq_scalar,neon_fp_vrecps_vrsqrts_ddd,
+   neon_fp_vrecps_vrsqrts_qqq,neon_bp_simple,neon_bp_2cycle,neon_bp_3cycle,
+   neon_ldr,neon_str,neon_vld1_1_2_regs,neon_vld1_3_4_regs,
+   neon_vld2_2_regs_vld1_vld2_all_lanes,neon_vld2_4_regs,neon_vld3_vld4,
+   neon_vst1_1_2_regs_vst2_2_regs,neon_vst1_3_4_regs,
+   neon_vst2_4_regs_vst3_vst4,neon_vst3_vst4,neon_vld1_vld2_lane,
+   neon_vld3_vld4_lane,neon_vst1_vst2_lane,neon_vst3_vst4_lane,
+   neon_vld3_vld4_all_lanes,neon_mcr,neon_mcr_2_mcrr,neon_mrc,neon_mrrc,
+   neon_ldm_2,neon_stm_2,none,unknown"
+  (cond [
+         (eq_attr "simd_type" "simd_dup") (const_string "neon_bp_simple")
+         (eq_attr "simd_type" "simd_movgp") (const_string "neon_bp_simple")
+         (eq_attr "simd_type" "simd_add,simd_logic,simd_logic_imm") (const_string "neon_int_1")
+         (eq_attr "simd_type" "simd_negabs,simd_addlv") (const_string "neon_int_3")
+         (eq_attr "simd_type" "simd_addn,simd_addn2,simd_addl,simd_sat_add,simd_sat_negabs") (const_string "neon_int_4")
+         (eq_attr "simd_type" "simd_move") (const_string "neon_vmov")
+         (eq_attr "simd_type" "simd_ins") (const_string "neon_mcr")
+         (and (eq_attr "simd_type" "simd_mul,simd_sat_mul") (eq_attr "simd_mode" "V8QI,V4HI")) (const_string "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long")
+         (and (eq_attr "simd_type" "simd_mul,simd_sat_mul") (eq_attr "simd_mode" "V2SI,V8QI,V16QI,V2SI")) (const_string "neon_mul_qqq_8_16_32_ddd_32")
+         (and (eq_attr "simd_type" "simd_mull,simd_sat_mull") (eq_attr "simd_mode" "V8QI,V16QI,V4HI,V8HI")) (const_string "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long")
+         (and (eq_attr "simd_type" "simd_mull,simd_sat_mull") (eq_attr "simd_mode" "V2SI,V4SI,V2DI")) (const_string "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar")
+         (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V8QI,V4HI")) (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
+         (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V2SI")) (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")
+         (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V16QI,V8HI")) (const_string "neon_mla_qqq_8_16")
+         (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V4SI")) (const_string "neon_mla_qqq_32_qqd_32_scalar")
+         (and (eq_attr "simd_type" "simd_mlal") (eq_attr "simd_mode" "V8QI,V16QI,V4HI,V8HI")) (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
+         (and (eq_attr "simd_type" "simd_mlal") (eq_attr "simd_mode" "V2SI,V4SI,V2DI")) (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")
+         (and (eq_attr "simd_type" "simd_fmla") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vmla_ddd")
+         (and (eq_attr "simd_type" "simd_fmla") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vmla_qqq")
+         (and (eq_attr "simd_type" "simd_fmla_elt") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vmla_ddd_scalar")
+         (and (eq_attr "simd_type" "simd_fmla_elt") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vmla_qqq_scalar")
+         (and (eq_attr "simd_type" "simd_fmul,simd_fmul_elt,simd_fdiv,simd_fsqrt") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vmul_ddd")
+         (and (eq_attr "simd_type" "simd_fmul,simd_fmul_elt,simd_fdiv,simd_fsqrt") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vmul_qqd")
+         (and (eq_attr "simd_type" "simd_fadd") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vadd_ddd_vabs_dd")
+         (and (eq_attr "simd_type" "simd_fadd") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vadd_qqq_vabs_qq")
+         (and (eq_attr "simd_type" "simd_fnegabs,simd_fminmax,simd_fminmaxv") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vadd_ddd_vabs_dd")
+         (and (eq_attr "simd_type" "simd_fnegabs,simd_fminmax,simd_fminmaxv") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vadd_qqq_vabs_qq")
+         (and (eq_attr "simd_type" "simd_shift,simd_shift_acc") (eq_attr "simd_mode" "V8QI,V4HI,V2SI")) (const_string "neon_vshl_ddd")
+         (and (eq_attr "simd_type" "simd_shift,simd_shift_acc") (eq_attr "simd_mode" "V16QI,V8HI,V4SI,V2DI")) (const_string "neon_shift_3")
+         (eq_attr "simd_type" "simd_minmax,simd_minmaxv") (const_string "neon_int_5")
+         (eq_attr "simd_type" "simd_shiftn_imm,simd_shiftn2_imm,simd_shiftl_imm,") (const_string "neon_shift_1")
+         (eq_attr "simd_type" "simd_load1,simd_load2") (const_string "neon_vld1_1_2_regs")
+         (eq_attr "simd_type" "simd_load3,simd_load3") (const_string "neon_vld1_3_4_regs")
+         (eq_attr "simd_type" "simd_load1r,simd_load2r,simd_load3r,simd_load4r") (const_string "neon_vld2_2_regs_vld1_vld2_all_lanes")
+         (eq_attr "simd_type" "simd_load1s,simd_load2s") (const_string "neon_vld1_vld2_lane")
+         (eq_attr "simd_type" "simd_load3s,simd_load4s") (const_string "neon_vld3_vld4_lane")
+         (eq_attr "simd_type" "simd_store1,simd_store2") (const_string "neon_vst1_1_2_regs_vst2_2_regs")
+         (eq_attr "simd_type" "simd_store3,simd_store4") (const_string "neon_vst1_3_4_regs")
+         (eq_attr "simd_type" "simd_store1s,simd_store2s") (const_string "neon_vst1_vst2_lane")
+         (eq_attr "simd_type" "simd_store3s,simd_store4s") (const_string "neon_vst3_vst4_lane")
+         (and (eq_attr "simd_type" "simd_frcpe,simd_frcps") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vrecps_vrsqrts_ddd")
+         (and (eq_attr "simd_type" "simd_frcpe,simd_frcps") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vrecps_vrsqrts_qqq")
+         (eq_attr "simd_type" "none") (const_string "none")
+  ]
+  (const_string "unknown")))
+
+
+(define_expand "mov<mode>"
+  [(set (match_operand:VALL 0 "aarch64_simd_nonimmediate_operand" "")
+       (match_operand:VALL 1 "aarch64_simd_general_operand" ""))]
+  "TARGET_SIMD"
+  "
+    if (GET_CODE (operands[0]) == MEM)
+      operands[1] = force_reg (<MODE>mode, operands[1]);
+  "
+)
+
+(define_expand "movmisalign<mode>"
+  [(set (match_operand:VALL 0 "aarch64_simd_nonimmediate_operand" "")
+        (match_operand:VALL 1 "aarch64_simd_general_operand" ""))]
+  "TARGET_SIMD"
+{
+  /* This pattern is not permitted to fail during expansion: if both arguments
+     are non-registers (e.g. memory := constant, which can be created by the
+     auto-vectorizer), force operand 1 into a register.  */
+  if (!register_operand (operands[0], <MODE>mode)
+      && !register_operand (operands[1], <MODE>mode))
+    operands[1] = force_reg (<MODE>mode, operands[1]);
+})
+
+(define_insn "aarch64_simd_dup<mode>"
+  [(set (match_operand:VDQ 0 "register_operand" "=w")
+        (vec_duplicate:VDQ (match_operand:<VEL> 1 "register_operand" "r")))]
+  "TARGET_SIMD"
+  "dup\\t%0.<Vtype>, %<vw>1"
+  [(set_attr "simd_type" "simd_dupgp")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_dup_lane<mode>"
+  [(set (match_operand:VDQ_I 0 "register_operand" "=w")
+        (vec_duplicate:VDQ_I
+         (vec_select:<VEL>
+           (match_operand:<VCON> 1 "register_operand" "w")
+           (parallel [(match_operand:SI 2 "immediate_operand" "i")])
+          )))]
+  "TARGET_SIMD"
+  "dup\\t%<v>0<Vmtype>, %1.<Vetype>[%2]"
+  [(set_attr "simd_type" "simd_dup")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_dup_lane<mode>"
+  [(set (match_operand:SDQ_I 0 "register_operand" "=w")
+       (vec_select:<VEL>
+         (match_operand:<VCON> 1 "register_operand" "w")
+         (parallel [(match_operand:SI 2 "immediate_operand" "i")])
+        ))]
+  "TARGET_SIMD"
+  "dup\\t%<v>0<Vmtype>, %1.<Vetype>[%2]"
+  [(set_attr "simd_type" "simd_dup")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_dup<mode>"
+  [(set (match_operand:VDQF 0 "register_operand" "=w")
+        (vec_duplicate:VDQF (match_operand:<VEL> 1 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "dup\\t%0.<Vtype>, %1.<Vetype>[0]"
+  [(set_attr "simd_type" "simd_dup")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "*aarch64_simd_mov<mode>"
+  [(set (match_operand:VD 0 "aarch64_simd_nonimmediate_operand"
+               "=w, Utv,  w, ?r, ?w, ?r, w")
+       (match_operand:VD 1 "aarch64_simd_general_operand"
+               "Utv,  w,  w,  w,  r,  r, Dn"))]
+  "TARGET_SIMD
+   && (register_operand (operands[0], <MODE>mode)
+       || register_operand (operands[1], <MODE>mode))"
+{
+   switch (which_alternative)
+     {
+     case 0: return "ld1\t{%0.<Vtype>}, %1";
+     case 1: return "st1\t{%1.<Vtype>}, %0";
+     case 2: return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>";
+     case 3: return "umov\t%0, %1.d[0]";
+     case 4: return "ins\t%0.d[0], %1";
+     case 5: return "mov\t%0, %1";
+     case 6:
+       {
+       int is_valid;
+       unsigned char widthc;
+       int width;
+       static char templ[40];
+       int shift = 0, mvn = 0;
+       const char *mnemonic;
+       int length = 0;
+
+       is_valid =
+         aarch64_simd_immediate_valid_for_move (operands[1], <MODE>mode,
+                                                &operands[1], &width, &widthc,
+                                                &mvn, &shift);
+       gcc_assert (is_valid != 0);
+
+       mnemonic = mvn ? "mvni" : "movi";
+       if (widthc != 'd')
+         length += snprintf (templ, sizeof (templ),
+                             "%s\t%%0.%d%c, %%1",
+                             mnemonic, 64 / width, widthc);
+       else
+         length += snprintf (templ, sizeof (templ), "%s\t%%d0, %%1", mnemonic);
+
+       if (shift != 0)
+         length += snprintf (templ + length, sizeof (templ) - length,
+                             ", lsl %d", shift);
+       return templ;
+       }
+     default: gcc_unreachable ();
+     }
+}
+  [(set_attr "simd_type" "simd_load1,simd_store1,simd_move,simd_movgp,simd_insgp,simd_move,simd_move_imm")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "*aarch64_simd_mov<mode>"
+  [(set (match_operand:VQ 0 "aarch64_simd_nonimmediate_operand"
+               "=w, Utv,  w, ?r, ?w, ?r, w")
+       (match_operand:VQ 1 "aarch64_simd_general_operand"
+               "Utv,  w,  w,  w,  r,  r, Dn"))]
+  "TARGET_SIMD
+   && (register_operand (operands[0], <MODE>mode)
+       || register_operand (operands[1], <MODE>mode))"
+{
+   switch (which_alternative)
+     {
+     case 0: return "ld1\t{%0.<Vtype>}, %1";
+     case 1: return "st1\t{%1.<Vtype>}, %0";
+     case 2: return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>";
+     case 3: return "umov\t%0, %1.d[0]\;umov\t%H0, %1.d[1]";
+     case 4: return "ins\t%0.d[0], %1\;ins\t%0.d[1], %H1";
+     case 5: return "#";
+     case 6:
+       {
+       int is_valid;
+       unsigned char widthc;
+       int width;
+       static char templ[40];
+       int shift = 0, mvn = 0;
+
+       is_valid =
+         aarch64_simd_immediate_valid_for_move (operands[1], <MODE>mode,
+                                                &operands[1], &width, &widthc,
+                                                &mvn, &shift);
+       gcc_assert (is_valid != 0);
+       if (shift)
+         snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, %%1, lsl %d",
+                   mvn ? "mvni" : "movi",
+                   128 / width, widthc, shift);
+       else
+         snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, %%1",
+                   mvn ? "mvni" : "movi",
+                   128 / width, widthc);
+       return templ;
+       }
+     default: gcc_unreachable ();
+     }
+}
+  [(set_attr "simd_type" "simd_load1,simd_store1,simd_move,simd_movgp,simd_insgp,simd_move,simd_move_imm")
+   (set_attr "simd_mode" "<MODE>")
+   (set_attr "length" "4,4,4,8,8,8,4")]
+)
+
+(define_split
+  [(set (match_operand:VQ 0 "register_operand" "")
+      (match_operand:VQ 1 "register_operand" ""))]
+  "TARGET_SIMD && reload_completed
+   && GP_REGNUM_P (REGNO (operands[0]))
+   && GP_REGNUM_P (REGNO (operands[1]))"
+  [(set (match_dup 0) (match_dup 1))
+   (set (match_dup 2) (match_dup 3))]
+{
+  int rdest = REGNO (operands[0]);
+  int rsrc = REGNO (operands[1]);
+  rtx dest[2], src[2];
+
+  dest[0] = gen_rtx_REG (DImode, rdest);
+  src[0] = gen_rtx_REG (DImode, rsrc);
+  dest[1] = gen_rtx_REG (DImode, rdest + 1);
+  src[1] = gen_rtx_REG (DImode, rsrc + 1);
+
+  aarch64_simd_disambiguate_copy (operands, dest, src, 2);
+})
+
+(define_insn "orn<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+       (ior:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w"))
+               (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "orn\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
+  [(set_attr "simd_type" "simd_logic")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "bic<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+       (and:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w"))
+               (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "bic\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
+  [(set_attr "simd_type" "simd_logic")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "add<mode>3"
+  [(set (match_operand:VDQ 0 "register_operand" "=w")
+        (plus:VDQ (match_operand:VDQ 1 "register_operand" "w")
+                 (match_operand:VDQ 2 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "add\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_add")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "sub<mode>3"
+  [(set (match_operand:VDQ 0 "register_operand" "=w")
+        (minus:VDQ (match_operand:VDQ 1 "register_operand" "w")
+                  (match_operand:VDQ 2 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "sub\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_add")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "mul<mode>3"
+  [(set (match_operand:VDQM 0 "register_operand" "=w")
+        (mult:VDQM (match_operand:VDQM 1 "register_operand" "w")
+                  (match_operand:VDQM 2 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "mul\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_mul")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "neg<mode>2"
+  [(set (match_operand:VDQM 0 "register_operand" "=w")
+        (neg:VDQM (match_operand:VDQM 1 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "neg\t%0.<Vtype>, %1.<Vtype>"
+  [(set_attr "simd_type" "simd_negabs")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "abs<mode>2"
+  [(set (match_operand:VDQ 0 "register_operand" "=w")
+        (abs:VDQ (match_operand:VDQ 1 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "abs\t%0.<Vtype>, %1.<Vtype>"
+  [(set_attr "simd_type" "simd_negabs")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "and<mode>3"
+  [(set (match_operand:VDQ 0 "register_operand" "=w")
+        (and:VDQ (match_operand:VDQ 1 "register_operand" "w")
+                (match_operand:VDQ 2 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "and\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
+  [(set_attr "simd_type" "simd_logic")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "ior<mode>3"
+  [(set (match_operand:VDQ 0 "register_operand" "=w")
+        (ior:VDQ (match_operand:VDQ 1 "register_operand" "w")
+                (match_operand:VDQ 2 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
+  [(set_attr "simd_type" "simd_logic")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "xor<mode>3"
+  [(set (match_operand:VDQ 0 "register_operand" "=w")
+        (xor:VDQ (match_operand:VDQ 1 "register_operand" "w")
+                (match_operand:VDQ 2 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "eor\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
+  [(set_attr "simd_type" "simd_logic")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "one_cmpl<mode>2"
+  [(set (match_operand:VDQ 0 "register_operand" "=w")
+        (not:VDQ (match_operand:VDQ 1 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "not\t%0.<Vbtype>, %1.<Vbtype>"
+  [(set_attr "simd_type" "simd_logic")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_vec_set<mode>"
+  [(set (match_operand:VQ_S 0 "register_operand" "=w")
+        (vec_merge:VQ_S
+           (vec_duplicate:VQ_S
+               (match_operand:<VEL> 1 "register_operand" "r"))
+           (match_operand:VQ_S 3 "register_operand" "0")
+           (match_operand:SI 2 "immediate_operand" "i")))]
+  "TARGET_SIMD"
+  "ins\t%0.<Vetype>[%p2], %w1";
+  [(set_attr "simd_type" "simd_insgp")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_lshr<mode>"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+       (lshiftrt:VDQ (match_operand:VDQ 1 "register_operand" "w")
+                    (match_operand:VDQ  2 "aarch64_simd_rshift_imm" "Dr")))]
+ "TARGET_SIMD"
+ "ushr\t%0.<Vtype>, %1.<Vtype>, %2"
+  [(set_attr "simd_type" "simd_shift_imm")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_ashr<mode>"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+       (ashiftrt:VDQ (match_operand:VDQ 1 "register_operand" "w")
+                    (match_operand:VDQ  2 "aarch64_simd_rshift_imm" "Dr")))]
+ "TARGET_SIMD"
+ "sshr\t%0.<Vtype>, %1.<Vtype>, %2"
+  [(set_attr "simd_type" "simd_shift_imm")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_imm_shl<mode>"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+       (ashift:VDQ (match_operand:VDQ 1 "register_operand" "w")
+                  (match_operand:VDQ  2 "aarch64_simd_lshift_imm" "Dl")))]
+ "TARGET_SIMD"
+  "shl\t%0.<Vtype>, %1.<Vtype>, %2"
+  [(set_attr "simd_type" "simd_shift_imm")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_reg_sshl<mode>"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+       (ashift:VDQ (match_operand:VDQ 1 "register_operand" "w")
+                  (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "sshl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_shift")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_reg_shl<mode>_unsigned"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+       (unspec:VDQ [(match_operand:VDQ 1 "register_operand" "w")
+                   (match_operand:VDQ 2 "register_operand" "w")]
+                  UNSPEC_ASHIFT_UNSIGNED))]
+ "TARGET_SIMD"
+ "ushl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_shift")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_reg_shl<mode>_signed"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+       (unspec:VDQ [(match_operand:VDQ 1 "register_operand" "w")
+                   (match_operand:VDQ 2 "register_operand" "w")]
+                  UNSPEC_ASHIFT_SIGNED))]
+ "TARGET_SIMD"
+ "sshl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_shift")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "ashl<mode>3"
+  [(match_operand:VDQ 0 "register_operand" "")
+   (match_operand:VDQ 1 "register_operand" "")
+   (match_operand:SI  2 "general_operand" "")]
+ "TARGET_SIMD"
+{
+  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+  int shift_amount;
+
+  if (CONST_INT_P (operands[2]))
+    {
+      shift_amount = INTVAL (operands[2]);
+      if (shift_amount >= 0 && shift_amount < bit_width)
+        {
+         rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
+                                                      shift_amount);
+         emit_insn (gen_aarch64_simd_imm_shl<mode> (operands[0],
+                                                    operands[1],
+                                                    tmp));
+          DONE;
+        }
+      else
+        {
+          operands[2] = force_reg (SImode, operands[2]);
+        }
+    }
+  else if (MEM_P (operands[2]))
+    {
+      operands[2] = force_reg (SImode, operands[2]);
+    }
+
+  if (REG_P (operands[2]))
+    {
+      rtx tmp = gen_reg_rtx (<MODE>mode);
+      emit_insn (gen_aarch64_simd_dup<mode> (tmp,
+                                            convert_to_mode (<VEL>mode,
+                                                             operands[2],
+                                                             0)));
+      emit_insn (gen_aarch64_simd_reg_sshl<mode> (operands[0], operands[1],
+                                                 tmp));
+      DONE;
+    }
+  else
+    FAIL;
+}
+)
+
+(define_expand "lshr<mode>3"
+  [(match_operand:VDQ 0 "register_operand" "")
+   (match_operand:VDQ 1 "register_operand" "")
+   (match_operand:SI  2 "general_operand" "")]
+ "TARGET_SIMD"
+{
+  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+  int shift_amount;
+
+  if (CONST_INT_P (operands[2]))
+    {
+      shift_amount = INTVAL (operands[2]);
+      if (shift_amount > 0 && shift_amount <= bit_width)
+        {
+         rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
+                                                      shift_amount);
+          emit_insn (gen_aarch64_simd_lshr<mode> (operands[0],
+                                                 operands[1],
+                                                 tmp));
+         DONE;
+       }
+      else
+        operands[2] = force_reg (SImode, operands[2]);
+    }
+  else if (MEM_P (operands[2]))
+    {
+      operands[2] = force_reg (SImode, operands[2]);
+    }
+
+  if (REG_P (operands[2]))
+    {
+      rtx tmp = gen_reg_rtx (SImode);
+      rtx tmp1 = gen_reg_rtx (<MODE>mode);
+      emit_insn (gen_negsi2 (tmp, operands[2]));
+      emit_insn (gen_aarch64_simd_dup<mode> (tmp1,
+                                            convert_to_mode (<VEL>mode,
+                                                             tmp, 0)));
+      emit_insn (gen_aarch64_simd_reg_shl<mode>_unsigned (operands[0],
+                                                         operands[1],
+                                                         tmp1));
+      DONE;
+    }
+  else
+    FAIL;
+}
+)
+
+(define_expand "ashr<mode>3"
+  [(match_operand:VDQ 0 "register_operand" "")
+   (match_operand:VDQ 1 "register_operand" "")
+   (match_operand:SI  2 "general_operand" "")]
+ "TARGET_SIMD"
+{
+  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+  int shift_amount;
+
+  if (CONST_INT_P (operands[2]))
+    {
+      shift_amount = INTVAL (operands[2]);
+      if (shift_amount > 0 && shift_amount <= bit_width)
+        {
+         rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
+                                                      shift_amount);
+          emit_insn (gen_aarch64_simd_ashr<mode> (operands[0],
+                                                 operands[1],
+                                                 tmp));
+          DONE;
+       }
+      else
+        operands[2] = force_reg (SImode, operands[2]);
+    }
+  else if (MEM_P (operands[2]))
+    {
+      operands[2] = force_reg (SImode, operands[2]);
+    }
+
+  if (REG_P (operands[2]))
+    {
+      rtx tmp = gen_reg_rtx (SImode);
+      rtx tmp1 = gen_reg_rtx (<MODE>mode);
+      emit_insn (gen_negsi2 (tmp, operands[2]));
+      emit_insn (gen_aarch64_simd_dup<mode> (tmp1,
+                                            convert_to_mode (<VEL>mode,
+                                                             tmp, 0)));
+      emit_insn (gen_aarch64_simd_reg_shl<mode>_signed (operands[0],
+                                                       operands[1],
+                                                       tmp1));
+      DONE;
+    }
+  else
+    FAIL;
+}
+)
+
+(define_expand "vashl<mode>3"
+ [(match_operand:VDQ 0 "register_operand" "")
+  (match_operand:VDQ 1 "register_operand" "")
+  (match_operand:VDQ 2 "register_operand" "")]
+ "TARGET_SIMD"
+{
+  emit_insn (gen_aarch64_simd_reg_sshl<mode> (operands[0], operands[1],
+                                             operands[2]));
+  DONE;
+})
+
+;; Using mode VQ_S as there is no V2DImode neg!
+;; Negating individual lanes most certainly offsets the
+;; gain from vectorization.
+(define_expand "vashr<mode>3"
+ [(match_operand:VQ_S 0 "register_operand" "")
+  (match_operand:VQ_S 1 "register_operand" "")
+  (match_operand:VQ_S 2 "register_operand" "")]
+ "TARGET_SIMD"
+{
+  rtx neg = gen_reg_rtx (<MODE>mode);
+  emit (gen_neg<mode>2 (neg, operands[2]));
+  emit_insn (gen_aarch64_simd_reg_shl<mode>_signed (operands[0], operands[1],
+                                                   neg));
+  DONE;
+})
+
+(define_expand "vlshr<mode>3"
+ [(match_operand:VQ_S 0 "register_operand" "")
+  (match_operand:VQ_S 1 "register_operand" "")
+  (match_operand:VQ_S 2 "register_operand" "")]
+ "TARGET_SIMD"
+{
+  rtx neg = gen_reg_rtx (<MODE>mode);
+  emit (gen_neg<mode>2 (neg, operands[2]));
+  emit_insn (gen_aarch64_simd_reg_shl<mode>_unsigned (operands[0], operands[1],
+                                                     neg));
+  DONE;
+})
+
+(define_expand "vec_set<mode>"
+  [(match_operand:VQ_S 0 "register_operand" "+w")
+   (match_operand:<VEL> 1 "register_operand" "r")
+   (match_operand:SI 2 "immediate_operand" "")]
+  "TARGET_SIMD"
+  {
+    HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
+    emit_insn (gen_aarch64_simd_vec_set<mode> (operands[0], operands[1],
+                                           GEN_INT (elem), operands[0]));
+    DONE;
+  }
+)
+
+(define_insn "aarch64_simd_vec_setv2di"
+  [(set (match_operand:V2DI 0 "register_operand" "=w")
+        (vec_merge:V2DI
+           (vec_duplicate:V2DI
+               (match_operand:DI 1 "register_operand" "r"))
+           (match_operand:V2DI 3 "register_operand" "0")
+           (match_operand:SI 2 "immediate_operand" "i")))]
+  "TARGET_SIMD"
+  "ins\t%0.d[%p2], %1";
+  [(set_attr "simd_type" "simd_insgp")
+   (set_attr "simd_mode" "V2DI")]
+)
+
+(define_expand "vec_setv2di"
+  [(match_operand:V2DI 0 "register_operand" "+w")
+   (match_operand:DI 1 "register_operand" "r")
+   (match_operand:SI 2 "immediate_operand" "")]
+  "TARGET_SIMD"
+  {
+    HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
+    emit_insn (gen_aarch64_simd_vec_setv2di (operands[0], operands[1],
+                                         GEN_INT (elem), operands[0]));
+    DONE;
+  }
+)
+
+(define_insn "aarch64_simd_vec_set<mode>"
+  [(set (match_operand:VDQF 0 "register_operand" "=w")
+        (vec_merge:VDQF
+           (vec_duplicate:VDQF
+               (match_operand:<VEL> 1 "register_operand" "w"))
+           (match_operand:VDQF 3 "register_operand" "0")
+           (match_operand:SI 2 "immediate_operand" "i")))]
+  "TARGET_SIMD"
+  "ins\t%0.<Vetype>[%p2], %1.<Vetype>[0]";
+  [(set_attr "simd_type" "simd_ins")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "vec_set<mode>"
+  [(match_operand:VDQF 0 "register_operand" "+w")
+   (match_operand:<VEL> 1 "register_operand" "w")
+   (match_operand:SI 2 "immediate_operand" "")]
+  "TARGET_SIMD"
+  {
+    HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
+    emit_insn (gen_aarch64_simd_vec_set<mode> (operands[0], operands[1],
+                                         GEN_INT (elem), operands[0]));
+    DONE;
+  }
+)
+
+
+(define_insn "aarch64_mla<mode>"
+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
+       (plus:VQ_S (mult:VQ_S (match_operand:VQ_S 2 "register_operand" "w")
+                            (match_operand:VQ_S 3 "register_operand" "w"))
+                 (match_operand:VQ_S 1 "register_operand" "0")))]
+ "TARGET_SIMD"
+ "mla\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
+  [(set_attr "simd_type" "simd_mla")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_mls<mode>"
+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
+       (minus:VQ_S (match_operand:VQ_S 1 "register_operand" "0")
+                  (mult:VQ_S (match_operand:VQ_S 2 "register_operand" "w")
+                             (match_operand:VQ_S 3 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ "mls\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
+  [(set_attr "simd_type" "simd_mla")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; Max/Min operations.
+(define_insn "<maxmin><mode>3"
+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
+       (MAXMIN:VQ_S (match_operand:VQ_S 1 "register_operand" "w")
+                   (match_operand:VQ_S 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "<maxmin>\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_minmax")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; Move into low-half clearing high half to 0.
+
+(define_insn "move_lo_quad_<mode>"
+  [(set (match_operand:VQ 0 "register_operand" "=w")
+        (vec_concat:VQ
+         (match_operand:<VHALF> 1 "register_operand" "w")
+         (vec_duplicate:<VHALF> (const_int 0))))]
+  "TARGET_SIMD"
+  "mov\\t%d0, %d1";
+  [(set_attr "simd_type" "simd_dup")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; Move into high-half.
+
+(define_insn "aarch64_simd_move_hi_quad_<mode>"
+  [(set (match_operand:VQ 0 "register_operand" "+w")
+        (vec_concat:VQ
+          (vec_select:<VHALF>
+                (match_dup 0)
+                (match_operand:VQ 2 "vect_par_cnst_lo_half" ""))
+         (match_operand:<VHALF> 1 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "ins\\t%0.d[1], %1.d[0]";
+  [(set_attr "simd_type" "simd_ins")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "move_hi_quad_<mode>"
+ [(match_operand:VQ 0 "register_operand" "")
+  (match_operand:<VHALF> 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
+  emit_insn (gen_aarch64_simd_move_hi_quad_<mode> (operands[0],
+                                                  operands[1], p));
+  DONE;
+})
+
+;; Narrowing operations.
+
+;; For doubles.
+(define_insn "aarch64_simd_vec_pack_trunc_<mode>"
+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+       (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "xtn\\t%0.<Vntype>, %1.<Vtype>"
+  [(set_attr "simd_type" "simd_shiftn_imm")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "vec_pack_trunc_<mode>"
+ [(match_operand:<VNARROWD> 0 "register_operand" "")
+  (match_operand:VDN 1 "register_operand" "")
+  (match_operand:VDN 2 "register_operand" "")]
+ "TARGET_SIMD"
+{
+  rtx tempreg = gen_reg_rtx (<VDBL>mode);
+
+  emit_insn (gen_move_lo_quad_<Vdbl> (tempreg, operands[1]));
+  emit_insn (gen_move_hi_quad_<Vdbl> (tempreg, operands[2]));
+  emit_insn (gen_aarch64_simd_vec_pack_trunc_<Vdbl> (operands[0], tempreg));
+  DONE;
+})
+
+;; For quads.
+
+(define_insn "vec_pack_trunc_<mode>"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "+&w")
+       (vec_concat:<VNARROWQ2>
+        (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w"))
+        (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ "xtn\\t%0.<Vntype>, %1.<Vtype>\;xtn2\\t%0.<V2ntype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_shiftn2_imm")
+   (set_attr "simd_mode" "<MODE>")
+   (set_attr "length" "8")]
+)
+
+;; Widening operations.
+
+(define_insn "aarch64_simd_vec_unpack<su>_lo_<mode>"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+                              (match_operand:VQW 1 "register_operand" "w")
+                              (match_operand:VQW 2 "vect_par_cnst_lo_half" "")
+                           )))]
+  "TARGET_SIMD"
+  "<su>shll %0.<Vwtype>, %1.<Vhalftype>, 0"
+  [(set_attr "simd_type" "simd_shiftl_imm")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_vec_unpack<su>_hi_<mode>"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+                              (match_operand:VQW 1 "register_operand" "w")
+                              (match_operand:VQW 2 "vect_par_cnst_hi_half" "")
+                           )))]
+  "TARGET_SIMD"
+  "<su>shll2 %0.<Vwtype>, %1.<Vtype>, 0"
+  [(set_attr "simd_type" "simd_shiftl_imm")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "vec_unpack<su>_hi_<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "")
+   (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand"))]
+  "TARGET_SIMD"
+  {
+    rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+    emit_insn (gen_aarch64_simd_vec_unpack<su>_hi_<mode> (operands[0],
+                                                         operands[1], p));
+    DONE;
+  }
+)
+
+(define_expand "vec_unpack<su>_lo_<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "")
+   (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))]
+  "TARGET_SIMD"
+  {
+    rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
+    emit_insn (gen_aarch64_simd_vec_unpack<su>_lo_<mode> (operands[0],
+                                                         operands[1], p));
+    DONE;
+  }
+)
+
+;; Widening arithmetic.
+
+(define_insn "aarch64_simd_vec_<su>mult_lo_<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+       (mult:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+                          (match_operand:VQW 1 "register_operand" "w")
+                           (match_operand:VQW 3 "vect_par_cnst_lo_half" "")))
+                    (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+                           (match_operand:VQW 2 "register_operand" "w")
+                           (match_dup 3)))))]
+  "TARGET_SIMD"
+  "<su>mull %0.<Vwtype>, %1.<Vhalftype>, %2.<Vhalftype>"
+  [(set_attr "simd_type" "simd_mull")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "vec_widen_<su>mult_lo_<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "")
+   (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))
+   (ANY_EXTEND:<VWIDE> (match_operand:VQW 2 "register_operand" ""))]
+ "TARGET_SIMD"
+ {
+   rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
+   emit_insn (gen_aarch64_simd_vec_<su>mult_lo_<mode> (operands[0],
+                                                      operands[1],
+                                                      operands[2], p));
+   DONE;
+ }
+)
+
+(define_insn "aarch64_simd_vec_<su>mult_hi_<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+      (mult:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+                           (match_operand:VQW 1 "register_operand" "w")
+                           (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))
+                   (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+                           (match_operand:VQW 2 "register_operand" "w")
+                           (match_dup 3)))))]
+  "TARGET_SIMD"
+  "<su>mull2 %0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_mull")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "vec_widen_<su>mult_hi_<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "")
+   (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))
+   (ANY_EXTEND:<VWIDE> (match_operand:VQW 2 "register_operand" ""))]
+ "TARGET_SIMD"
+ {
+   rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+   emit_insn (gen_aarch64_simd_vec_<su>mult_hi_<mode> (operands[0],
+                                                      operands[1],
+                                                      operands[2], p));
+   DONE;
+
+ }
+)
+
+;; FP vector operations.
+;; AArch64 AdvSIMD supports single-precision (32-bit) and 
+;; double-precision (64-bit) floating-point data types and arithmetic as
+;; defined by the IEEE 754-2008 standard.  This makes them vectorizable 
+;; without the need for -ffast-math or -funsafe-math-optimizations.
+;;
+;; Floating-point operations can raise an exception.  Vectorizing such
+;; operations are safe because of reasons explained below.
+;;
+;; ARMv8 permits an extension to enable trapped floating-point
+;; exception handling, however this is an optional feature.  In the
+;; event of a floating-point exception being raised by vectorised
+;; code then:
+;; 1.  If trapped floating-point exceptions are available, then a trap
+;;     will be taken when any lane raises an enabled exception.  A trap
+;;     handler may determine which lane raised the exception.
+;; 2.  Alternatively a sticky exception flag is set in the
+;;     floating-point status register (FPSR).  Software may explicitly
+;;     test the exception flags, in which case the tests will either
+;;     prevent vectorisation, allowing precise identification of the
+;;     failing operation, or if tested outside of vectorisable regions
+;;     then the specific operation and lane are not of interest.
+
+;; FP arithmetic operations.
+
+(define_insn "add<mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+       (plus:VDQF (match_operand:VDQF 1 "register_operand" "w")
+                 (match_operand:VDQF 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fadd\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_fadd")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "sub<mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+       (minus:VDQF (match_operand:VDQF 1 "register_operand" "w")
+                  (match_operand:VDQF 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fsub\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_fadd")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "mul<mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+       (mult:VDQF (match_operand:VDQF 1 "register_operand" "w")
+                 (match_operand:VDQF 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_fmul")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "div<mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+       (div:VDQF (match_operand:VDQF 1 "register_operand" "w")
+                (match_operand:VDQF 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fdiv\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_fdiv")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "neg<mode>2"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+       (neg:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fneg\\t%0.<Vtype>, %1.<Vtype>"
+  [(set_attr "simd_type" "simd_fnegabs")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "abs<mode>2"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+       (abs:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fabs\\t%0.<Vtype>, %1.<Vtype>"
+  [(set_attr "simd_type" "simd_fnegabs")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "fma<mode>4"
+  [(set (match_operand:VDQF 0 "register_operand" "=w")
+       (fma:VDQF (match_operand:VDQF 1 "register_operand" "w")
+                (match_operand:VDQF 2 "register_operand" "w")
+                (match_operand:VDQF 3 "register_operand" "0")))]
+  "TARGET_SIMD"
+ "fmla\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_fmla")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_vmls<mode>"
+  [(set (match_operand:VDQF 0 "register_operand" "=w")
+       (minus:VDQF (match_operand:VDQF 1 "register_operand" "0")
+                  (mult:VDQF (match_operand:VDQF 2 "register_operand" "w")
+                             (match_operand:VDQF 3 "register_operand" "w"))))]
+  "TARGET_SIMD"
+ "fmls\\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
+  [(set_attr "simd_type" "simd_fmla")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; FP Max/Min
+;; Max/Min are introduced by idiom recognition by GCC's mid-end.  An
+;; expression like:
+;;      a = (b < c) ? b : c;
+;; is idiom-matched as MIN_EXPR<b,c> only if -ffinite-math-only is enabled
+;; either explicitly or indirectly via -ffast-math.
+;;
+;; MIN_EXPR and MAX_EXPR eventually map to 'smin' and 'smax' in RTL.
+;; The 'smax' and 'smin' RTL standard pattern names do not specify which
+;; operand will be returned when both operands are zero (i.e. they may not
+;; honour signed zeroes), or when either operand is NaN.  Therefore GCC
+;; only introduces MIN_EXPR/MAX_EXPR in fast math mode or when not honouring
+;; NaNs.
+
+(define_insn "smax<mode>3"
+  [(set (match_operand:VDQF 0 "register_operand" "=w")
+        (smax:VDQF (match_operand:VDQF 1 "register_operand" "w")
+                  (match_operand:VDQF 2 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "fmaxnm\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_fminmax")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "smin<mode>3"
+  [(set (match_operand:VDQF 0 "register_operand" "=w")
+        (smin:VDQF (match_operand:VDQF 1 "register_operand" "w")
+                  (match_operand:VDQF 2 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "fminnm\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_fminmax")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; FP 'across lanes' max and min ops.
+
+(define_insn "reduc_s<fmaxminv>_v4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=w")
+       (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")]
+                   FMAXMINV))]
+ "TARGET_SIMD"
+ "f<fmaxminv>nmv\\t%s0, %1.4s";
+  [(set_attr "simd_type" "simd_fminmaxv")
+   (set_attr "simd_mode" "V4SF")]
+)
+
+(define_insn "reduc_s<fmaxminv>_<mode>"
+ [(set (match_operand:V2F 0 "register_operand" "=w")
+       (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
+                   FMAXMINV))]
+ "TARGET_SIMD"
+ "f<fmaxminv>nmp\\t%0.<Vtype>, %1.<Vtype>, %1.<Vtype>";
+  [(set_attr "simd_type" "simd_fminmax")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; FP 'across lanes' add.
+
+(define_insn "aarch64_addvv4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=w")
+       (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")]
+                   UNSPEC_FADDV))]
+ "TARGET_SIMD"
+ "faddp\\t%0.4s, %1.4s, %1.4s"
+  [(set_attr "simd_type" "simd_fadd")
+   (set_attr "simd_mode" "V4SF")]
+)
+
+(define_expand "reduc_uplus_v4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=w")
+       (match_operand:V4SF 1 "register_operand" "w"))]
+ "TARGET_SIMD"
+{
+  rtx tmp = gen_reg_rtx (V4SFmode);
+  emit_insn (gen_aarch64_addvv4sf (tmp, operands[1]));
+  emit_insn (gen_aarch64_addvv4sf (operands[0], tmp));
+  DONE;
+})
+
+(define_expand "reduc_splus_v4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=w")
+       (match_operand:V4SF 1 "register_operand" "w"))]
+ "TARGET_SIMD"
+{
+  rtx tmp = gen_reg_rtx (V4SFmode);
+  emit_insn (gen_aarch64_addvv4sf (tmp, operands[1]));
+  emit_insn (gen_aarch64_addvv4sf (operands[0], tmp));
+  DONE;
+})
+
+(define_insn "aarch64_addv<mode>"
+ [(set (match_operand:V2F 0 "register_operand" "=w")
+       (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
+                   UNSPEC_FADDV))]
+ "TARGET_SIMD"
+ "faddp\\t%<Vetype>0, %1.<Vtype>"
+  [(set_attr "simd_type" "simd_fadd")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "reduc_uplus_<mode>"
+ [(set (match_operand:V2F 0 "register_operand" "=w")
+       (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
+                   UNSPEC_FADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+(define_expand "reduc_splus_<mode>"
+ [(set (match_operand:V2F 0 "register_operand" "=w")
+       (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
+                   UNSPEC_FADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+;; Reduction across lanes.
+
+(define_insn "aarch64_addv<mode>"
+ [(set (match_operand:VDQV 0 "register_operand" "=w")
+       (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
+                   UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ "addv\\t%<Vetype>0, %1.<Vtype>"
+  [(set_attr "simd_type" "simd_addv")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "reduc_splus_<mode>"
+ [(set (match_operand:VDQV 0 "register_operand" "=w")
+       (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
+                   UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+(define_expand "reduc_uplus_<mode>"
+ [(set (match_operand:VDQV 0 "register_operand" "=w")
+       (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
+                   UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+(define_insn "aarch64_addvv2di"
+ [(set (match_operand:V2DI 0 "register_operand" "=w")
+       (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
+                   UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ "addp\\t%d0, %1.2d"
+  [(set_attr "simd_type" "simd_add")
+   (set_attr "simd_mode" "V2DI")]
+)
+
+(define_expand "reduc_uplus_v2di"
+ [(set (match_operand:V2DI 0 "register_operand" "=w")
+       (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
+                   UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+(define_expand "reduc_splus_v2di"
+ [(set (match_operand:V2DI 0 "register_operand" "=w")
+       (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
+                   UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+(define_insn "aarch64_addvv2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=w")
+       (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
+                   UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ "addp\\t%0.2s, %1.2s, %1.2s"
+  [(set_attr "simd_type" "simd_add")
+   (set_attr "simd_mode" "V2SI")]
+)
+
+(define_expand "reduc_uplus_v2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=w")
+       (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
+                   UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+(define_expand "reduc_splus_v2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=w")
+       (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
+                   UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+(define_insn "reduc_<maxminv>_<mode>"
+ [(set (match_operand:VDQV 0 "register_operand" "=w")
+       (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
+                   MAXMINV))]
+ "TARGET_SIMD"
+ "<maxminv>v\\t%<Vetype>0, %1.<Vtype>"
+  [(set_attr "simd_type" "simd_minmaxv")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "reduc_<maxminv>_v2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=w")
+       (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
+                   MAXMINV))]
+ "TARGET_SIMD"
+ "<maxminv>p\\t%0.2s, %1.2s, %1.2s"
+  [(set_attr "simd_type" "simd_minmax")
+   (set_attr "simd_mode" "V2SI")]
+)
+
+;; Patterns for AArch64 SIMD Intrinsics.
+
+(define_expand "aarch64_create<mode>"
+  [(match_operand:VD_RE 0 "register_operand" "")
+   (match_operand:DI 1 "general_operand" "")]
+  "TARGET_SIMD"
+{
+  rtx src = gen_lowpart (<MODE>mode, operands[1]);
+  emit_move_insn (operands[0], src);
+  DONE;
+})
+
+(define_insn "aarch64_get_lane_signed<mode>"
+  [(set (match_operand:<VEL> 0 "register_operand" "=r")
+       (sign_extend:<VEL>
+         (vec_select:<VEL>
+           (match_operand:VQ_S 1 "register_operand" "w")
+           (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
+  "TARGET_SIMD"
+  "smov\\t%0, %1.<Vetype>[%2]"
+  [(set_attr "simd_type" "simd_movgp")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_get_lane_unsigned<mode>"
+  [(set (match_operand:<VEL> 0 "register_operand" "=r")
+       (zero_extend:<VEL>
+         (vec_select:<VEL>
+           (match_operand:VDQ 1 "register_operand" "w")
+           (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
+  "TARGET_SIMD"
+  "umov\\t%<vw>0, %1.<Vetype>[%2]"
+  [(set_attr "simd_type" "simd_movgp")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_get_lane<mode>"
+  [(set (match_operand:<VEL> 0 "register_operand" "=w")
+       (vec_select:<VEL>
+           (match_operand:VDQF 1 "register_operand" "w")
+           (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
+  "TARGET_SIMD"
+  "mov\\t%0.<Vetype>[0], %1.<Vetype>[%2]"
+  [(set_attr "simd_type" "simd_ins")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_get_lanedi"
+  [(match_operand:DI 0 "register_operand" "=r")
+   (match_operand:DI 1 "register_operand" "w")
+   (match_operand:SI 2 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_lane_bounds (operands[2], 0, 1);
+  emit_move_insn (operands[0], operands[1]);
+  DONE;
+})
+
+(define_expand "aarch64_reinterpretv8qi<mode>"
+  [(match_operand:V8QI 0 "register_operand" "")
+   (match_operand:VDC 1 "register_operand" "")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_reinterpret (operands[0], operands[1]);
+  DONE;
+})
+
+(define_expand "aarch64_reinterpretv4hi<mode>"
+  [(match_operand:V4HI 0 "register_operand" "")
+   (match_operand:VDC 1 "register_operand" "")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_reinterpret (operands[0], operands[1]);
+  DONE;
+})
+
+(define_expand "aarch64_reinterpretv2si<mode>"
+  [(match_operand:V2SI 0 "register_operand" "")
+   (match_operand:VDC 1 "register_operand" "")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_reinterpret (operands[0], operands[1]);
+  DONE;
+})
+
+(define_expand "aarch64_reinterpretv2sf<mode>"
+  [(match_operand:V2SF 0 "register_operand" "")
+   (match_operand:VDC 1 "register_operand" "")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_reinterpret (operands[0], operands[1]);
+  DONE;
+})
+
+(define_expand "aarch64_reinterpretdi<mode>"
+  [(match_operand:DI 0 "register_operand" "")
+   (match_operand:VD_RE 1 "register_operand" "")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_reinterpret (operands[0], operands[1]);
+  DONE;
+})
+
+(define_expand "aarch64_reinterpretv16qi<mode>"
+  [(match_operand:V16QI 0 "register_operand" "")
+   (match_operand:VQ 1 "register_operand" "")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_reinterpret (operands[0], operands[1]);
+  DONE;
+})
+
+(define_expand "aarch64_reinterpretv8hi<mode>"
+  [(match_operand:V8HI 0 "register_operand" "")
+   (match_operand:VQ 1 "register_operand" "")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_reinterpret (operands[0], operands[1]);
+  DONE;
+})
+
+(define_expand "aarch64_reinterpretv4si<mode>"
+  [(match_operand:V4SI 0 "register_operand" "")
+   (match_operand:VQ 1 "register_operand" "")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_reinterpret (operands[0], operands[1]);
+  DONE;
+})
+
+(define_expand "aarch64_reinterpretv4sf<mode>"
+  [(match_operand:V4SF 0 "register_operand" "")
+   (match_operand:VQ 1 "register_operand" "")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_reinterpret (operands[0], operands[1]);
+  DONE;
+})
+
+(define_expand "aarch64_reinterpretv2di<mode>"
+  [(match_operand:V2DI 0 "register_operand" "")
+   (match_operand:VQ 1 "register_operand" "")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_reinterpret (operands[0], operands[1]);
+  DONE;
+})
+
+(define_expand "aarch64_reinterpretv2df<mode>"
+  [(match_operand:V2DF 0 "register_operand" "")
+   (match_operand:VQ 1 "register_operand" "")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_reinterpret (operands[0], operands[1]);
+  DONE;
+})
+
+;; In this insn, operand 1 should be low, and operand 2 the high part of the
+;; dest vector.
+
+(define_insn "*aarch64_combinez<mode>"
+  [(set (match_operand:<VDBL> 0 "register_operand" "=&w")
+        (vec_concat:<VDBL>
+          (match_operand:VDIC 1 "register_operand" "w")
+          (match_operand:VDIC 2 "aarch64_simd_imm_zero" "Dz")))]
+  "TARGET_SIMD"
+  "mov\\t%0.8b, %1.8b"
+  [(set_attr "simd_type" "simd_move")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_combine<mode>"
+  [(set (match_operand:<VDBL> 0 "register_operand" "=&w")
+        (vec_concat:<VDBL> (match_operand:VDC 1 "register_operand" "w")
+                          (match_operand:VDC 2 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "mov\\t%0.d[0], %1.d[0]\;ins\\t%0.d[1], %2.d[0]"
+  [(set_attr "simd_type" "simd_ins")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; <su><addsub>l<q>.
+
+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l2<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+       (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+                          (match_operand:VQW 1 "register_operand" "w")
+                          (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))
+                      (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+                          (match_operand:VQW 2 "register_operand" "w")
+                          (match_dup 3)))))]
+  "TARGET_SIMD"
+  "<ANY_EXTEND:su><ADDSUB:optab>l2 %0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_addl")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_saddl2<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:VQW 1 "register_operand" "w")
+   (match_operand:VQW 2 "register_operand" "w")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  emit_insn (gen_aarch64_saddl2<mode>_internal (operands[0], operands[1],
+                                               operands[2], p));
+  DONE;
+})
+
+(define_expand "aarch64_uaddl2<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:VQW 1 "register_operand" "w")
+   (match_operand:VQW 2 "register_operand" "w")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  emit_insn (gen_aarch64_uaddl2<mode>_internal (operands[0], operands[1],
+                                               operands[2], p));
+  DONE;
+})
+
+(define_expand "aarch64_ssubl2<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:VQW 1 "register_operand" "w")
+   (match_operand:VQW 2 "register_operand" "w")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  emit_insn (gen_aarch64_ssubl2<mode>_internal (operands[0], operands[1],
+                                               operands[2], p));
+  DONE;
+})
+
+(define_expand "aarch64_usubl2<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:VQW 1 "register_operand" "w")
+   (match_operand:VQW 2 "register_operand" "w")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  emit_insn (gen_aarch64_usubl2<mode>_internal (operands[0], operands[1],
+                                               operands[2], p));
+  DONE;
+})
+
+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+       (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE>
+                          (match_operand:VDW 1 "register_operand" "w"))
+                      (ANY_EXTEND:<VWIDE>
+                          (match_operand:VDW 2 "register_operand" "w"))))]
+  "TARGET_SIMD"
+  "<ANY_EXTEND:su><ADDSUB:optab>l %0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_addl")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; <su><addsub>w<q>.
+
+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>w<mode>"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (ADDSUB:<VWIDE> (match_operand:<VWIDE> 1 "register_operand" "w")
+                       (ANY_EXTEND:<VWIDE>
+                         (match_operand:VDW 2 "register_operand" "w"))))]
+  "TARGET_SIMD"
+  "<ANY_EXTEND:su><ADDSUB:optab>w\\t%0.<Vwtype>, %1.<Vwtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_addl")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>w2<mode>_internal"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (ADDSUB:<VWIDE> (match_operand:<VWIDE> 1 "register_operand" "w")
+                       (ANY_EXTEND:<VWIDE>
+                         (vec_select:<VHALF>
+                          (match_operand:VQW 2 "register_operand" "w")
+                          (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))))]
+  "TARGET_SIMD"
+  "<ANY_EXTEND:su><ADDSUB:optab>w2\\t%0.<Vwtype>, %1.<Vwtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_addl")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_saddw2<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "w")
+   (match_operand:VQW 2 "register_operand" "w")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  emit_insn (gen_aarch64_saddw2<mode>_internal (operands[0], operands[1],
+                                               operands[2], p));
+  DONE;
+})
+
+(define_expand "aarch64_uaddw2<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "w")
+   (match_operand:VQW 2 "register_operand" "w")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  emit_insn (gen_aarch64_uaddw2<mode>_internal (operands[0], operands[1],
+                                               operands[2], p));
+  DONE;
+})
+
+
+(define_expand "aarch64_ssubw2<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "w")
+   (match_operand:VQW 2 "register_operand" "w")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  emit_insn (gen_aarch64_ssubw2<mode>_internal (operands[0], operands[1],
+                                               operands[2], p));
+  DONE;
+})
+
+(define_expand "aarch64_usubw2<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "w")
+   (match_operand:VQW 2 "register_operand" "w")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  emit_insn (gen_aarch64_usubw2<mode>_internal (operands[0], operands[1],
+                                               operands[2], p));
+  DONE;
+})
+
+;; <su><r>h<addsub>.
+
+(define_insn "aarch64_<sur>h<addsub><mode>"
+  [(set (match_operand:VQ_S 0 "register_operand" "=w")
+        (unspec:VQ_S [(match_operand:VQ_S 1 "register_operand" "w")
+                     (match_operand:VQ_S 2 "register_operand" "w")]
+                    HADDSUB))]
+  "TARGET_SIMD"
+  "<sur>h<addsub>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_add")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; <r><addsub>hn<q>.
+
+(define_insn "aarch64_<sur><addsub>hn<mode>"
+  [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+        (unspec:<VNARROWQ> [(match_operand:VQN 1 "register_operand" "w")
+                           (match_operand:VQN 2 "register_operand" "w")]
+                           ADDSUBHN))]
+  "TARGET_SIMD"
+  "<sur><addsub>hn\\t%0.<Vntype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_addn")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_<sur><addsub>hn2<mode>"
+  [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+        (unspec:<VNARROWQ2> [(match_operand:<VNARROWQ> 1 "register_operand" "0")
+                            (match_operand:VQN 2 "register_operand" "w")
+                            (match_operand:VQN 3 "register_operand" "w")]
+                            ADDSUBHN2))]
+  "TARGET_SIMD"
+  "<sur><addsub>hn2\\t%0.<V2ntype>, %2.<Vtype>, %3.<Vtype>"
+  [(set_attr "simd_type" "simd_addn2")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; pmul.
+
+(define_insn "aarch64_pmul<mode>"
+  [(set (match_operand:VB 0 "register_operand" "=w")
+        (unspec:VB [(match_operand:VB 1 "register_operand" "w")
+                   (match_operand:VB 2 "register_operand" "w")]
+                  UNSPEC_PMUL))]
+ "TARGET_SIMD"
+ "pmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_mul")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; <su>q<addsub>
+
+(define_insn "aarch64_<su_optab><optab><mode>"
+  [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
+       (BINQOPS:VSDQ_I (match_operand:VSDQ_I 1 "register_operand" "w")
+                         (match_operand:VSDQ_I 2 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "<su_optab><optab>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+  [(set_attr "simd_type" "simd_add")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; suqadd and usqadd
+
+(define_insn "aarch64_<sur>qadd<mode>"
+  [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
+       (unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "0")
+                       (match_operand:VSDQ_I 2 "register_operand" "w")]
+                      USSUQADD))]
+  "TARGET_SIMD"
+  "<sur>qadd\\t%<v>0<Vmtype>, %<v>2<Vmtype>"
+  [(set_attr "simd_type" "simd_sat_add")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; sqmovun
+
+(define_insn "aarch64_sqmovun<mode>"
+  [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+       (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")]
+                            UNSPEC_SQXTUN))]
+   "TARGET_SIMD"
+   "sqxtun\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>"
+   [(set_attr "simd_type" "simd_sat_shiftn_imm")
+    (set_attr "simd_mode" "<MODE>")]
+ )
+
+;; sqmovn and uqmovn
+
+(define_insn "aarch64_<sur>qmovn<mode>"
+  [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+       (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")]
+                            SUQMOVN))]
+  "TARGET_SIMD"
+  "<sur>qxtn\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>"
+   [(set_attr "simd_type" "simd_sat_shiftn_imm")
+    (set_attr "simd_mode" "<MODE>")]
+ )
+
+;; <su>q<absneg>
+
+(define_insn "aarch64_s<optab><mode>"
+  [(set (match_operand:VSDQ_I_BHSI 0 "register_operand" "=w")
+       (UNQOPS:VSDQ_I_BHSI
+         (match_operand:VSDQ_I_BHSI 1 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "s<optab>\\t%<v>0<Vmtype>, %<v>1<Vmtype>"
+  [(set_attr "simd_type" "simd_sat_negabs")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; sq<r>dmulh.
+
+(define_insn "aarch64_sq<r>dmulh<mode>"
+  [(set (match_operand:VSDQ_HSI 0 "register_operand" "=w")
+       (unspec:VSDQ_HSI
+         [(match_operand:VSDQ_HSI 1 "register_operand" "w")
+          (match_operand:VSDQ_HSI 2 "register_operand" "w")]
+        VQDMULH))]
+  "TARGET_SIMD"
+  "sq<r>dmulh\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+  [(set_attr "simd_type" "simd_sat_mul")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; sq<r>dmulh_lane
+
+(define_insn "aarch64_sq<r>dmulh_lane<mode>"
+  [(set (match_operand:VSDQ_HSI 0 "register_operand" "=w")
+        (unspec:VSDQ_HSI
+         [(match_operand:VSDQ_HSI 1 "register_operand" "w")
+           (vec_select:<VEL>
+             (match_operand:<VCON> 2 "register_operand" "<vwx>")
+             (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
+        VQDMULH))]
+  "TARGET_SIMD"
+  "*
+   aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode));
+   return \"sq<r>dmulh\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]\";"
+  [(set_attr "simd_type" "simd_sat_mul")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vqdml[sa]l
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l<mode>"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (SBINQOPS:<VWIDE>
+         (match_operand:<VWIDE> 1 "register_operand" "0")
+         (ss_ashift:<VWIDE>
+             (mult:<VWIDE>
+               (sign_extend:<VWIDE>
+                     (match_operand:VSD_HSI 2 "register_operand" "w"))
+               (sign_extend:<VWIDE>
+                     (match_operand:VSD_HSI 3 "register_operand" "w")))
+             (const_int 1))))]
+  "TARGET_SIMD"
+  "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %<v>3<Vmtype>"
+  [(set_attr "simd_type" "simd_sat_mlal")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vqdml[sa]l_lane
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l_lane<mode>_internal"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (SBINQOPS:<VWIDE>
+         (match_operand:<VWIDE> 1 "register_operand" "0")
+         (ss_ashift:<VWIDE>
+           (mult:<VWIDE>
+             (sign_extend:<VWIDE>
+               (match_operand:VD_HSI 2 "register_operand" "w"))
+             (sign_extend:<VWIDE>
+               (vec_duplicate:VD_HSI
+                 (vec_select:<VEL>
+                   (match_operand:<VCON> 3 "register_operand" "<vwx>")
+                   (parallel [(match_operand:SI 4 "immediate_operand" "i")])))
+              ))
+           (const_int 1))))]
+  "TARGET_SIMD"
+  "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"
+  [(set_attr "simd_type" "simd_sat_mlal")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l_lane<mode>_internal"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (SBINQOPS:<VWIDE>
+         (match_operand:<VWIDE> 1 "register_operand" "0")
+         (ss_ashift:<VWIDE>
+           (mult:<VWIDE>
+             (sign_extend:<VWIDE>
+               (match_operand:SD_HSI 2 "register_operand" "w"))
+             (sign_extend:<VWIDE>
+               (vec_select:<VEL>
+                 (match_operand:<VCON> 3 "register_operand" "<vwx>")
+                 (parallel [(match_operand:SI 4 "immediate_operand" "i")])))
+              )
+           (const_int 1))))]
+  "TARGET_SIMD"
+  "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"
+  [(set_attr "simd_type" "simd_sat_mlal")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmlal_lane<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "0")
+   (match_operand:VSD_HSI 2 "register_operand" "w")
+   (match_operand:<VCON> 3 "register_operand" "<vwx>")
+   (match_operand:SI 4 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
+  emit_insn (gen_aarch64_sqdmlal_lane<mode>_internal (operands[0], operands[1],
+                                                     operands[2], operands[3],
+                                                     operands[4]));
+  DONE;
+})
+
+(define_expand "aarch64_sqdmlal_laneq<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "0")
+   (match_operand:VSD_HSI 2 "register_operand" "w")
+   (match_operand:<VCON> 3 "register_operand" "<vwx>")
+   (match_operand:SI 4 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode));
+  emit_insn (gen_aarch64_sqdmlal_lane<mode>_internal (operands[0], operands[1],
+                                                     operands[2], operands[3],
+                                                     operands[4]));
+  DONE;
+})
+
+(define_expand "aarch64_sqdmlsl_lane<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "0")
+   (match_operand:VSD_HSI 2 "register_operand" "w")
+   (match_operand:<VCON> 3 "register_operand" "<vwx>")
+   (match_operand:SI 4 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
+  emit_insn (gen_aarch64_sqdmlsl_lane<mode>_internal (operands[0], operands[1],
+                                                     operands[2], operands[3],
+                                                     operands[4]));
+  DONE;
+})
+
+(define_expand "aarch64_sqdmlsl_laneq<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "0")
+   (match_operand:VSD_HSI 2 "register_operand" "w")
+   (match_operand:<VCON> 3 "register_operand" "<vwx>")
+   (match_operand:SI 4 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode));
+  emit_insn (gen_aarch64_sqdmlsl_lane<mode>_internal (operands[0], operands[1],
+                                                     operands[2], operands[3],
+                                                     operands[4]));
+  DONE;
+})
+
+;; vqdml[sa]l_n
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l_n<mode>"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (SBINQOPS:<VWIDE>
+         (match_operand:<VWIDE> 1 "register_operand" "0")
+         (ss_ashift:<VWIDE>
+             (mult:<VWIDE>
+               (sign_extend:<VWIDE>
+                     (match_operand:VD_HSI 2 "register_operand" "w"))
+               (sign_extend:<VWIDE>
+                 (vec_duplicate:VD_HSI
+                   (match_operand:<VEL> 3 "register_operand" "w"))))
+             (const_int 1))))]
+  "TARGET_SIMD"
+  "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[0]"
+  [(set_attr "simd_type" "simd_sat_mlal")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; sqdml[as]l2
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l2<mode>_internal"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (SBINQOPS:<VWIDE>
+         (match_operand:<VWIDE> 1 "register_operand" "0")
+         (ss_ashift:<VWIDE>
+             (mult:<VWIDE>
+               (sign_extend:<VWIDE>
+                 (vec_select:<VHALF>
+                     (match_operand:VQ_HSI 2 "register_operand" "w")
+                     (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
+               (sign_extend:<VWIDE>
+                 (vec_select:<VHALF>
+                     (match_operand:VQ_HSI 3 "register_operand" "w")
+                     (match_dup 4))))
+             (const_int 1))))]
+  "TARGET_SIMD"
+  "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %<v>3<Vmtype>"
+  [(set_attr "simd_type" "simd_sat_mlal")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmlal2<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "w")
+   (match_operand:VQ_HSI 2 "register_operand" "w")
+   (match_operand:VQ_HSI 3 "register_operand" "w")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  emit_insn (gen_aarch64_sqdmlal2<mode>_internal (operands[0], operands[1],
+                                                 operands[2], operands[3], p));
+  DONE;
+})
+
+(define_expand "aarch64_sqdmlsl2<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "w")
+   (match_operand:VQ_HSI 2 "register_operand" "w")
+   (match_operand:VQ_HSI 3 "register_operand" "w")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  emit_insn (gen_aarch64_sqdmlsl2<mode>_internal (operands[0], operands[1],
+                                                 operands[2], operands[3], p));
+  DONE;
+})
+
+;; vqdml[sa]l2_lane
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l2_lane<mode>_internal"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (SBINQOPS:<VWIDE>
+         (match_operand:<VWIDE> 1 "register_operand" "0")
+         (ss_ashift:<VWIDE>
+             (mult:<VWIDE>
+               (sign_extend:<VWIDE>
+                  (vec_select:<VHALF>
+                    (match_operand:VQ_HSI 2 "register_operand" "w")
+                    (match_operand:VQ_HSI 5 "vect_par_cnst_hi_half" "")))
+               (sign_extend:<VWIDE>
+                  (vec_duplicate:<VHALF>
+                   (vec_select:<VEL>
+                     (match_operand:<VCON> 3 "register_operand" "<vwx>")
+                     (parallel [(match_operand:SI 4 "immediate_operand" "i")])
+                   ))))
+             (const_int 1))))]
+  "TARGET_SIMD"
+  "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"
+  [(set_attr "simd_type" "simd_sat_mlal")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmlal2_lane<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "w")
+   (match_operand:VQ_HSI 2 "register_operand" "w")
+   (match_operand:<VCON> 3 "register_operand" "<vwx>")
+   (match_operand:SI 4 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
+  emit_insn (gen_aarch64_sqdmlal2_lane<mode>_internal (operands[0], operands[1],
+                                                      operands[2], operands[3],
+                                                      operands[4], p));
+  DONE;
+})
+
+(define_expand "aarch64_sqdmlal2_laneq<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "w")
+   (match_operand:VQ_HSI 2 "register_operand" "w")
+   (match_operand:<VCON> 3 "register_operand" "<vwx>")
+   (match_operand:SI 4 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode));
+  emit_insn (gen_aarch64_sqdmlal2_lane<mode>_internal (operands[0], operands[1],
+                                                      operands[2], operands[3],
+                                                      operands[4], p));
+  DONE;
+})
+
+(define_expand "aarch64_sqdmlsl2_lane<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "w")
+   (match_operand:VQ_HSI 2 "register_operand" "w")
+   (match_operand:<VCON> 3 "register_operand" "<vwx>")
+   (match_operand:SI 4 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
+  emit_insn (gen_aarch64_sqdmlsl2_lane<mode>_internal (operands[0], operands[1],
+                                                      operands[2], operands[3],
+                                                      operands[4], p));
+  DONE;
+})
+
+(define_expand "aarch64_sqdmlsl2_laneq<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "w")
+   (match_operand:VQ_HSI 2 "register_operand" "w")
+   (match_operand:<VCON> 3 "register_operand" "<vwx>")
+   (match_operand:SI 4 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode));
+  emit_insn (gen_aarch64_sqdmlsl2_lane<mode>_internal (operands[0], operands[1],
+                                                      operands[2], operands[3],
+                                                      operands[4], p));
+  DONE;
+})
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l2_n<mode>_internal"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (SBINQOPS:<VWIDE>
+         (match_operand:<VWIDE> 1 "register_operand" "0")
+         (ss_ashift:<VWIDE>
+           (mult:<VWIDE>
+             (sign_extend:<VWIDE>
+                (vec_select:<VHALF>
+                  (match_operand:VQ_HSI 2 "register_operand" "w")
+                  (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
+             (sign_extend:<VWIDE>
+                (vec_duplicate:<VHALF>
+                 (match_operand:<VEL> 3 "register_operand" "w"))))
+           (const_int 1))))]
+  "TARGET_SIMD"
+  "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[0]"
+  [(set_attr "simd_type" "simd_sat_mlal")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmlal2_n<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "w")
+   (match_operand:VQ_HSI 2 "register_operand" "w")
+   (match_operand:<VEL> 3 "register_operand" "w")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  emit_insn (gen_aarch64_sqdmlal2_n<mode>_internal (operands[0], operands[1],
+                                                   operands[2], operands[3],
+                                                   p));
+  DONE;
+})
+
+(define_expand "aarch64_sqdmlsl2_n<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:<VWIDE> 1 "register_operand" "w")
+   (match_operand:VQ_HSI 2 "register_operand" "w")
+   (match_operand:<VEL> 3 "register_operand" "w")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  emit_insn (gen_aarch64_sqdmlsl2_n<mode>_internal (operands[0], operands[1],
+                                                   operands[2], operands[3],
+                                                   p));
+  DONE;
+})
+
+;; vqdmull
+
+(define_insn "aarch64_sqdmull<mode>"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (ss_ashift:<VWIDE>
+            (mult:<VWIDE>
+              (sign_extend:<VWIDE>
+                    (match_operand:VSD_HSI 1 "register_operand" "w"))
+              (sign_extend:<VWIDE>
+                    (match_operand:VSD_HSI 2 "register_operand" "w")))
+            (const_int 1)))]
+  "TARGET_SIMD"
+  "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+  [(set_attr "simd_type" "simd_sat_mul")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vqdmull_lane
+
+(define_insn "aarch64_sqdmull_lane<mode>_internal"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (ss_ashift:<VWIDE>
+            (mult:<VWIDE>
+              (sign_extend:<VWIDE>
+                (match_operand:VD_HSI 1 "register_operand" "w"))
+              (sign_extend:<VWIDE>
+                 (vec_duplicate:VD_HSI
+                   (vec_select:<VEL>
+                    (match_operand:<VCON> 2 "register_operand" "<vwx>")
+                    (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
+              ))
+            (const_int 1)))]
+  "TARGET_SIMD"
+  "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"
+  [(set_attr "simd_type" "simd_sat_mul")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_sqdmull_lane<mode>_internal"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (ss_ashift:<VWIDE>
+            (mult:<VWIDE>
+              (sign_extend:<VWIDE>
+                (match_operand:SD_HSI 1 "register_operand" "w"))
+              (sign_extend:<VWIDE>
+                 (vec_select:<VEL>
+                  (match_operand:<VCON> 2 "register_operand" "<vwx>")
+                  (parallel [(match_operand:SI 3 "immediate_operand" "i")]))
+              ))
+            (const_int 1)))]
+  "TARGET_SIMD"
+  "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"
+  [(set_attr "simd_type" "simd_sat_mul")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmull_lane<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:VSD_HSI 1 "register_operand" "w")
+   (match_operand:<VCON> 2 "register_operand" "<vwx>")
+   (match_operand:SI 3 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
+  emit_insn (gen_aarch64_sqdmull_lane<mode>_internal (operands[0], operands[1],
+                                                     operands[2], operands[3]));
+  DONE;
+})
+
+(define_expand "aarch64_sqdmull_laneq<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:VD_HSI 1 "register_operand" "w")
+   (match_operand:<VCON> 2 "register_operand" "<vwx>")
+   (match_operand:SI 3 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode));
+  emit_insn (gen_aarch64_sqdmull_lane<mode>_internal
+              (operands[0], operands[1], operands[2], operands[3]));
+  DONE;
+})
+
+;; vqdmull_n
+
+(define_insn "aarch64_sqdmull_n<mode>"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (ss_ashift:<VWIDE>
+            (mult:<VWIDE>
+              (sign_extend:<VWIDE>
+                (match_operand:VD_HSI 1 "register_operand" "w"))
+              (sign_extend:<VWIDE>
+                 (vec_duplicate:VD_HSI
+                   (match_operand:<VEL> 2 "register_operand" "w")))
+              )
+            (const_int 1)))]
+  "TARGET_SIMD"
+  "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[0]"
+  [(set_attr "simd_type" "simd_sat_mul")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vqdmull2
+
+
+
+(define_insn "aarch64_sqdmull2<mode>_internal"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (ss_ashift:<VWIDE>
+            (mult:<VWIDE>
+              (sign_extend:<VWIDE>
+                (vec_select:<VHALF>
+                   (match_operand:VQ_HSI 1 "register_operand" "w")
+                   (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" "")))
+              (sign_extend:<VWIDE>
+                (vec_select:<VHALF>
+                   (match_operand:VQ_HSI 2 "register_operand" "w")
+                   (match_dup 3)))
+              )
+            (const_int 1)))]
+  "TARGET_SIMD"
+  "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+  [(set_attr "simd_type" "simd_sat_mul")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmull2<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:VQ_HSI 1 "register_operand" "w")
+   (match_operand:<VCON> 2 "register_operand" "w")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  emit_insn (gen_aarch64_sqdmull2<mode>_internal (operands[0], operands[1],
+                                                 operands[2], p));
+  DONE;
+})
+
+;; vqdmull2_lane
+
+(define_insn "aarch64_sqdmull2_lane<mode>_internal"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (ss_ashift:<VWIDE>
+            (mult:<VWIDE>
+              (sign_extend:<VWIDE>
+                (vec_select:<VHALF>
+                   (match_operand:VQ_HSI 1 "register_operand" "w")
+                   (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
+              (sign_extend:<VWIDE>
+                 (vec_duplicate:<VHALF>
+                   (vec_select:<VEL>
+                    (match_operand:<VCON> 2 "register_operand" "<vwx>")
+                    (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
+              ))
+            (const_int 1)))]
+  "TARGET_SIMD"
+  "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"
+  [(set_attr "simd_type" "simd_sat_mul")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmull2_lane<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:VQ_HSI 1 "register_operand" "w")
+   (match_operand:<VCON> 2 "register_operand" "<vwx>")
+   (match_operand:SI 3 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
+  emit_insn (gen_aarch64_sqdmull2_lane<mode>_internal (operands[0], operands[1],
+                                                      operands[2], operands[3],
+                                                      p));
+  DONE;
+})
+
+(define_expand "aarch64_sqdmull2_laneq<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:VQ_HSI 1 "register_operand" "w")
+   (match_operand:<VCON> 2 "register_operand" "<vwx>")
+   (match_operand:SI 3 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode));
+  emit_insn (gen_aarch64_sqdmull2_lane<mode>_internal (operands[0], operands[1],
+                                                      operands[2], operands[3],
+                                                      p));
+  DONE;
+})
+
+;; vqdmull2_n
+
+(define_insn "aarch64_sqdmull2_n<mode>_internal"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+        (ss_ashift:<VWIDE>
+            (mult:<VWIDE>
+              (sign_extend:<VWIDE>
+                (vec_select:<VHALF>
+                   (match_operand:VQ_HSI 1 "register_operand" "w")
+                   (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" "")))
+              (sign_extend:<VWIDE>
+                 (vec_duplicate:<VHALF>
+                   (match_operand:<VEL> 2 "register_operand" "w")))
+              )
+            (const_int 1)))]
+  "TARGET_SIMD"
+  "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[0]"
+  [(set_attr "simd_type" "simd_sat_mul")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmull2_n<mode>"
+  [(match_operand:<VWIDE> 0 "register_operand" "=w")
+   (match_operand:VQ_HSI 1 "register_operand" "w")
+   (match_operand:<VEL> 2 "register_operand" "w")]
+  "TARGET_SIMD"
+{
+  rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+  emit_insn (gen_aarch64_sqdmull2_n<mode>_internal (operands[0], operands[1],
+                                                   operands[2], p));
+  DONE;
+})
+
+;; vshl
+
+(define_insn "aarch64_<sur>shl<mode>"
+  [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+        (unspec:VSDQ_I_DI
+         [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
+           (match_operand:VSDQ_I_DI 2 "register_operand" "w")]
+         VSHL))]
+  "TARGET_SIMD"
+  "<sur>shl\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>";
+  [(set_attr "simd_type" "simd_shift")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+
+;; vqshl
+
+(define_insn "aarch64_<sur>q<r>shl<mode>"
+  [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
+        (unspec:VSDQ_I
+         [(match_operand:VSDQ_I 1 "register_operand" "w")
+           (match_operand:VSDQ_I 2 "register_operand" "w")]
+         VQSHL))]
+  "TARGET_SIMD"
+  "<sur>q<r>shl\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>";
+  [(set_attr "simd_type" "simd_sat_shift")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vshl_n
+
+(define_expand "aarch64_sshl_n<mode>"
+  [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+   (match_operand:VSDQ_I_DI 1 "register_operand" "w")
+   (match_operand:SI 2 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  emit_insn (gen_ashl<mode>3 (operands[0], operands[1], operands[2]));
+  DONE;
+})
+
+(define_expand "aarch64_ushl_n<mode>"
+  [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+   (match_operand:VSDQ_I_DI 1 "register_operand" "w")
+   (match_operand:SI 2 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  emit_insn (gen_ashl<mode>3 (operands[0], operands[1], operands[2]));
+  DONE;
+})
+
+;; vshll_n
+
+(define_insn "aarch64_<sur>shll_n<mode>"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+       (unspec:<VWIDE> [(match_operand:VDW 1 "register_operand" "w")
+                        (match_operand:SI 2 "immediate_operand" "i")]
+                         VSHLL))]
+  "TARGET_SIMD"
+  "*
+  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+  aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
+  if (INTVAL (operands[2]) == bit_width)
+  {
+    return \"shll\\t%0.<Vwtype>, %1.<Vtype>, %2\";
+  }
+  else {
+    return \"<sur>shll\\t%0.<Vwtype>, %1.<Vtype>, %2\";
+  }"
+  [(set_attr "simd_type" "simd_shift_imm")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vshll_high_n
+
+(define_insn "aarch64_<sur>shll2_n<mode>"
+  [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+       (unspec:<VWIDE> [(match_operand:VQW 1 "register_operand" "w")
+                        (match_operand:SI 2 "immediate_operand" "i")]
+                         VSHLL))]
+  "TARGET_SIMD"
+  "*
+  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+  aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
+  if (INTVAL (operands[2]) == bit_width)
+  {
+    return \"shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\";
+  }
+  else {
+    return \"<sur>shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\";
+  }"
+  [(set_attr "simd_type" "simd_shift_imm")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vshr_n
+
+(define_expand "aarch64_sshr_n<mode>"
+  [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+   (match_operand:VSDQ_I_DI 1 "register_operand" "w")
+   (match_operand:SI 2 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  emit_insn (gen_ashr<mode>3 (operands[0], operands[1], operands[2]));
+  DONE;
+})
+
+(define_expand "aarch64_ushr_n<mode>"
+  [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+   (match_operand:VSDQ_I_DI 1 "register_operand" "w")
+   (match_operand:SI 2 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  emit_insn (gen_lshr<mode>3 (operands[0], operands[1], operands[2]));
+  DONE;
+})
+
+;; vrshr_n
+
+(define_insn "aarch64_<sur>shr_n<mode>"
+  [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+        (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
+                          (match_operand:SI 2 "immediate_operand" "i")]
+                         VRSHR_N))]
+  "TARGET_SIMD"
+  "*
+  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+  aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
+  return \"<sur>shr\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
+  [(set_attr "simd_type" "simd_shift_imm")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; v(r)sra_n
+
+(define_insn "aarch64_<sur>sra_n<mode>"
+  [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+       (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
+                      (match_operand:VSDQ_I_DI 2 "register_operand" "w")
+                       (match_operand:SI 3 "immediate_operand" "i")]
+                      VSRA))]
+  "TARGET_SIMD"
+  "*
+  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+  aarch64_simd_const_bounds (operands[3], 1, bit_width + 1);
+  return \"<sur>sra\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
+  [(set_attr "simd_type" "simd_shift_imm_acc")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vs<lr>i_n
+
+(define_insn "aarch64_<sur>s<lr>i_n<mode>"
+  [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+       (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
+                      (match_operand:VSDQ_I_DI 2 "register_operand" "w")
+                       (match_operand:SI 3 "immediate_operand" "i")]
+                      VSLRI))]
+  "TARGET_SIMD"
+  "*
+  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+  aarch64_simd_const_bounds (operands[3], 1 - <VSLRI:offsetlr>,
+                             bit_width - <VSLRI:offsetlr> + 1);
+  return \"s<lr>i\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
+  [(set_attr "simd_type" "simd_shift_imm")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vqshl(u)
+
+(define_insn "aarch64_<sur>qshl<u>_n<mode>"
+  [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
+       (unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "w")
+                      (match_operand:SI 2 "immediate_operand" "i")]
+                      VQSHL_N))]
+  "TARGET_SIMD"
+  "*
+  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+  aarch64_simd_const_bounds (operands[2], 0, bit_width);
+  return \"<sur>qshl<u>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
+  [(set_attr "simd_type" "simd_sat_shift_imm")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+
+;; vq(r)shr(u)n_n
+
+(define_insn "aarch64_<sur>q<r>shr<u>n_n<mode>"
+  [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+        (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")
+                           (match_operand:SI 2 "immediate_operand" "i")]
+                          VQSHRN_N))]
+  "TARGET_SIMD"
+  "*
+  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+  aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
+  return \"<sur>q<r>shr<u>n\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>, %2\";"
+  [(set_attr "simd_type" "simd_sat_shiftn_imm")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+
+;; cm(eq|ge|le|lt|gt)
+
+(define_insn "aarch64_cm<cmp><mode>"
+  [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
+        (unspec:<V_cmp_result>
+         [(match_operand:VSDQ_I_DI 1 "register_operand" "w,w")
+          (match_operand:VSDQ_I_DI 2 "aarch64_simd_reg_or_zero" "w,Z")]
+          VCMP_S))]
+  "TARGET_SIMD"
+  "@
+  cm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>
+  cm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, #0"
+  [(set_attr "simd_type" "simd_cmp")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; cm(hs|hi|tst)
+
+(define_insn "aarch64_cm<cmp><mode>"
+  [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
+        (unspec:<V_cmp_result>
+         [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
+          (match_operand:VSDQ_I_DI 2 "register_operand" "w")]
+          VCMP_U))]
+  "TARGET_SIMD"
+  "cm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+  [(set_attr "simd_type" "simd_cmp")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; addp
+
+(define_insn "aarch64_addp<mode>"
+  [(set (match_operand:VD_BHSI 0 "register_operand" "=w")
+        (unspec:VD_BHSI
+          [(match_operand:VD_BHSI 1 "register_operand" "w")
+          (match_operand:VD_BHSI 2 "register_operand" "w")]
+          UNSPEC_ADDP))]
+  "TARGET_SIMD"
+  "addp\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+  [(set_attr "simd_type" "simd_add")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_addpdi"
+  [(set (match_operand:DI 0 "register_operand" "=w")
+        (unspec:DI
+          [(match_operand:V2DI 1 "register_operand" "w")]
+          UNSPEC_ADDP))]
+  "TARGET_SIMD"
+  "addp\t%d0, %1.2d"
+  [(set_attr "simd_type" "simd_add")
+   (set_attr "simd_mode" "DI")]
+)
+
+;; v(max|min)
+
+(define_expand "aarch64_<maxmin><mode>"
+ [(set (match_operand:VDQ_BHSI 0 "register_operand" "=w")
+       (MAXMIN:VDQ_BHSI (match_operand:VDQ_BHSI 1 "register_operand" "w")
+                       (match_operand:VDQ_BHSI 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+{
+  emit_insn (gen_<maxmin><mode>3 (operands[0], operands[1], operands[2]));
+  DONE;
+})
+
+
+(define_insn "aarch64_<fmaxmin><mode>"
+  [(set (match_operand:VDQF 0 "register_operand" "=w")
+        (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")
+                     (match_operand:VDQF 2 "register_operand" "w")]
+                     FMAXMIN))]
+  "TARGET_SIMD"
+  "<fmaxmin>\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "simd_type" "simd_fminmax")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+;; sqrt
+
+(define_insn "sqrt<mode>2"
+  [(set (match_operand:VDQF 0 "register_operand" "=w")
+        (sqrt:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
+  "TARGET_SIMD"
+  "fsqrt\\t%0.<Vtype>, %1.<Vtype>"
+  [(set_attr "simd_type" "simd_fsqrt")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqrt<mode>"
+  [(match_operand:VDQF 0 "register_operand" "=w")
+   (match_operand:VDQF 1 "register_operand" "w")]
+  "TARGET_SIMD"
+{
+  emit_insn (gen_sqrt<mode>2 (operands[0], operands[1]));
+  DONE;
+})
+
+
+;; Patterns for vector struct loads and stores.
+
+(define_insn "vec_load_lanesoi<mode>"
+  [(set (match_operand:OI 0 "register_operand" "=w")
+       (unspec:OI [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")
+                   (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+                  UNSPEC_LD2))]
+  "TARGET_SIMD"
+  "ld2\\t{%S0.<Vtype> - %T0.<Vtype>}, %1"
+  [(set_attr "simd_type" "simd_load2")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "vec_store_lanesoi<mode>"
+  [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
+       (unspec:OI [(match_operand:OI 1 "register_operand" "w")
+                    (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+                   UNSPEC_ST2))]
+  "TARGET_SIMD"
+  "st2\\t{%S1.<Vtype> - %T1.<Vtype>}, %0"
+  [(set_attr "simd_type" "simd_store2")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "vec_load_lanesci<mode>"
+  [(set (match_operand:CI 0 "register_operand" "=w")
+       (unspec:CI [(match_operand:CI 1 "aarch64_simd_struct_operand" "Utv")
+                   (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+                  UNSPEC_LD3))]
+  "TARGET_SIMD"
+  "ld3\\t{%S0.<Vtype> - %U0.<Vtype>}, %1"
+  [(set_attr "simd_type" "simd_load3")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "vec_store_lanesci<mode>"
+  [(set (match_operand:CI 0 "aarch64_simd_struct_operand" "=Utv")
+       (unspec:CI [(match_operand:CI 1 "register_operand" "w")
+                    (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+                   UNSPEC_ST3))]
+  "TARGET_SIMD"
+  "st3\\t{%S1.<Vtype> - %U1.<Vtype>}, %0"
+  [(set_attr "simd_type" "simd_store3")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "vec_load_lanesxi<mode>"
+  [(set (match_operand:XI 0 "register_operand" "=w")
+       (unspec:XI [(match_operand:XI 1 "aarch64_simd_struct_operand" "Utv")
+                   (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+                  UNSPEC_LD4))]
+  "TARGET_SIMD"
+  "ld4\\t{%S0.<Vtype> - %V0.<Vtype>}, %1"
+  [(set_attr "simd_type" "simd_load4")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "vec_store_lanesxi<mode>"
+  [(set (match_operand:XI 0 "aarch64_simd_struct_operand" "=Utv")
+       (unspec:XI [(match_operand:XI 1 "register_operand" "w")
+                    (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+                   UNSPEC_ST4))]
+  "TARGET_SIMD"
+  "st4\\t{%S1.<Vtype> - %V1.<Vtype>}, %0"
+  [(set_attr "simd_type" "simd_store4")
+   (set_attr "simd_mode" "<MODE>")])
+
+;; Reload patterns for AdvSIMD register list operands.
+
+(define_expand "mov<mode>"
+  [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "")
+       (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" ""))]
+  "TARGET_SIMD"
+{
+  if (can_create_pseudo_p ())
+    {
+      if (GET_CODE (operands[0]) != REG)
+       operands[1] = force_reg (<MODE>mode, operands[1]);
+    }
+})
+
+(define_insn "*aarch64_mov<mode>"
+  [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "=w,Utv,w")
+       (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" " w,w,Utv"))]
+  "TARGET_SIMD
+   && (register_operand (operands[0], <MODE>mode)
+       || register_operand (operands[1], <MODE>mode))"
+
+{
+  switch (which_alternative)
+    {
+    case 0: return "#";
+    case 1: return "st1\\t{%S1.16b - %<Vendreg>1.16b}, %0";
+    case 2: return "ld1\\t{%S0.16b - %<Vendreg>0.16b}, %1";
+    default: gcc_unreachable ();
+    }
+}
+  [(set_attr "simd_type" "simd_move,simd_store<nregs>,simd_load<nregs>")
+   (set (attr "length") (symbol_ref "aarch64_simd_attr_length_move (insn)"))
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_split
+  [(set (match_operand:OI 0 "register_operand" "")
+       (match_operand:OI 1 "register_operand" ""))]
+  "TARGET_SIMD && reload_completed"
+  [(set (match_dup 0) (match_dup 1))
+   (set (match_dup 2) (match_dup 3))]
+{
+  int rdest = REGNO (operands[0]);
+  int rsrc = REGNO (operands[1]);
+  rtx dest[2], src[2];
+
+  dest[0] = gen_rtx_REG (TFmode, rdest);
+  src[0] = gen_rtx_REG (TFmode, rsrc);
+  dest[1] = gen_rtx_REG (TFmode, rdest + 1);
+  src[1] = gen_rtx_REG (TFmode, rsrc + 1);
+
+  aarch64_simd_disambiguate_copy (operands, dest, src, 2);
+})
+
+(define_split
+  [(set (match_operand:CI 0 "register_operand" "")
+       (match_operand:CI 1 "register_operand" ""))]
+  "TARGET_SIMD && reload_completed"
+  [(set (match_dup 0) (match_dup 1))
+   (set (match_dup 2) (match_dup 3))
+   (set (match_dup 4) (match_dup 5))]
+{
+  int rdest = REGNO (operands[0]);
+  int rsrc = REGNO (operands[1]);
+  rtx dest[3], src[3];
+
+  dest[0] = gen_rtx_REG (TFmode, rdest);
+  src[0] = gen_rtx_REG (TFmode, rsrc);
+  dest[1] = gen_rtx_REG (TFmode, rdest + 1);
+  src[1] = gen_rtx_REG (TFmode, rsrc + 1);
+  dest[2] = gen_rtx_REG (TFmode, rdest + 2);
+  src[2] = gen_rtx_REG (TFmode, rsrc + 2);
+
+  aarch64_simd_disambiguate_copy (operands, dest, src, 3);
+})
+
+(define_split
+  [(set (match_operand:XI 0 "register_operand" "")
+       (match_operand:XI 1 "register_operand" ""))]
+  "TARGET_SIMD && reload_completed"
+  [(set (match_dup 0) (match_dup 1))
+   (set (match_dup 2) (match_dup 3))
+   (set (match_dup 4) (match_dup 5))
+   (set (match_dup 6) (match_dup 7))]
+{
+  int rdest = REGNO (operands[0]);
+  int rsrc = REGNO (operands[1]);
+  rtx dest[4], src[4];
+
+  dest[0] = gen_rtx_REG (TFmode, rdest);
+  src[0] = gen_rtx_REG (TFmode, rsrc);
+  dest[1] = gen_rtx_REG (TFmode, rdest + 1);
+  src[1] = gen_rtx_REG (TFmode, rsrc + 1);
+  dest[2] = gen_rtx_REG (TFmode, rdest + 2);
+  src[2] = gen_rtx_REG (TFmode, rsrc + 2);
+  dest[3] = gen_rtx_REG (TFmode, rdest + 3);
+  src[3] = gen_rtx_REG (TFmode, rsrc + 3);
+
+  aarch64_simd_disambiguate_copy (operands, dest, src, 4);
+})
+
+(define_insn "aarch64_ld2<mode>_dreg"
+  [(set (match_operand:OI 0 "register_operand" "=w")
+       (subreg:OI
+         (vec_concat:<VRL2>
+           (vec_concat:<VDBL>
+            (unspec:VD [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")]
+                       UNSPEC_LD2)
+            (vec_duplicate:VD (const_int 0)))
+           (vec_concat:<VDBL>
+            (unspec:VD [(match_dup 1)]
+                       UNSPEC_LD2)
+            (vec_duplicate:VD (const_int 0)))) 0))]
+  "TARGET_SIMD"
+  "ld2\\t{%S0.<Vtype> - %T0.<Vtype>}, %1"
+  [(set_attr "simd_type" "simd_load2")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_ld2<mode>_dreg"
+  [(set (match_operand:OI 0 "register_operand" "=w")
+       (subreg:OI
+         (vec_concat:<VRL2>
+           (vec_concat:<VDBL>
+            (unspec:DX [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")]
+                       UNSPEC_LD2)
+            (const_int 0))
+           (vec_concat:<VDBL>
+            (unspec:DX [(match_dup 1)]
+                       UNSPEC_LD2)
+            (const_int 0))) 0))]
+  "TARGET_SIMD"
+  "ld1\\t{%S0.1d - %T0.1d}, %1"
+  [(set_attr "simd_type" "simd_load2")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_ld3<mode>_dreg"
+  [(set (match_operand:CI 0 "register_operand" "=w")
+       (subreg:CI
+        (vec_concat:<VRL3>
+         (vec_concat:<VRL2>
+           (vec_concat:<VDBL>
+            (unspec:VD [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")]
+                       UNSPEC_LD3)
+            (vec_duplicate:VD (const_int 0)))
+           (vec_concat:<VDBL>
+            (unspec:VD [(match_dup 1)]
+                       UNSPEC_LD3)
+            (vec_duplicate:VD (const_int 0))))
+         (vec_concat:<VDBL>
+            (unspec:VD [(match_dup 1)]
+                       UNSPEC_LD3)
+            (vec_duplicate:VD (const_int 0)))) 0))]
+  "TARGET_SIMD"
+  "ld3\\t{%S0.<Vtype> - %U0.<Vtype>}, %1"
+  [(set_attr "simd_type" "simd_load3")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_ld3<mode>_dreg"
+  [(set (match_operand:CI 0 "register_operand" "=w")
+       (subreg:CI
+        (vec_concat:<VRL3>
+         (vec_concat:<VRL2>
+           (vec_concat:<VDBL>
+            (unspec:DX [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")]
+                       UNSPEC_LD3)
+            (const_int 0))
+           (vec_concat:<VDBL>
+            (unspec:DX [(match_dup 1)]
+                       UNSPEC_LD3)
+            (const_int 0)))
+         (vec_concat:<VDBL>
+            (unspec:DX [(match_dup 1)]
+                       UNSPEC_LD3)
+            (const_int 0))) 0))]
+  "TARGET_SIMD"
+  "ld1\\t{%S0.1d - %U0.1d}, %1"
+  [(set_attr "simd_type" "simd_load3")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_ld4<mode>_dreg"
+  [(set (match_operand:XI 0 "register_operand" "=w")
+       (subreg:XI
+        (vec_concat:<VRL4>
+          (vec_concat:<VRL2>
+            (vec_concat:<VDBL>
+              (unspec:VD [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")]
+                         UNSPEC_LD4)
+              (vec_duplicate:VD (const_int 0)))
+             (vec_concat:<VDBL>
+               (unspec:VD [(match_dup 1)]
+                       UNSPEC_LD4)
+               (vec_duplicate:VD (const_int 0))))
+          (vec_concat:<VRL2>
+            (vec_concat:<VDBL>
+              (unspec:VD [(match_dup 1)]
+                       UNSPEC_LD4)
+              (vec_duplicate:VD (const_int 0)))
+            (vec_concat:<VDBL>
+              (unspec:VD [(match_dup 1)]
+                       UNSPEC_LD4)
+              (vec_duplicate:VD (const_int 0))))) 0))]
+  "TARGET_SIMD"
+  "ld4\\t{%S0.<Vtype> - %V0.<Vtype>}, %1"
+  [(set_attr "simd_type" "simd_load4")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_ld4<mode>_dreg"
+  [(set (match_operand:XI 0 "register_operand" "=w")
+       (subreg:XI
+        (vec_concat:<VRL4>
+          (vec_concat:<VRL2>
+            (vec_concat:<VDBL>
+              (unspec:DX [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")]
+                         UNSPEC_LD4)
+              (const_int 0))
+             (vec_concat:<VDBL>
+               (unspec:DX [(match_dup 1)]
+                       UNSPEC_LD4)
+               (const_int 0)))
+          (vec_concat:<VRL2>
+            (vec_concat:<VDBL>
+              (unspec:DX [(match_dup 1)]
+                       UNSPEC_LD4)
+              (const_int 0))
+            (vec_concat:<VDBL>
+              (unspec:DX [(match_dup 1)]
+                       UNSPEC_LD4)
+              (const_int 0)))) 0))]
+  "TARGET_SIMD"
+  "ld1\\t{%S0.1d - %V0.1d}, %1"
+  [(set_attr "simd_type" "simd_load4")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_expand "aarch64_ld<VSTRUCT:nregs><VDC:mode>"
+ [(match_operand:VSTRUCT 0 "register_operand" "=w")
+  (match_operand:DI 1 "register_operand" "r")
+  (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+  "TARGET_SIMD"
+{
+  enum machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode;
+  rtx mem = gen_rtx_MEM (mode, operands[1]);
+
+  emit_insn (gen_aarch64_ld<VSTRUCT:nregs><VDC:mode>_dreg (operands[0], mem));
+  DONE;
+})
+
+(define_expand "aarch64_ld<VSTRUCT:nregs><VQ:mode>"
+ [(match_operand:VSTRUCT 0 "register_operand" "=w")
+  (match_operand:DI 1 "register_operand" "r")
+  (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+  "TARGET_SIMD"
+{
+  enum machine_mode mode = <VSTRUCT:MODE>mode;
+  rtx mem = gen_rtx_MEM (mode, operands[1]);
+
+  emit_insn (gen_vec_load_lanes<VSTRUCT:mode><VQ:mode> (operands[0], mem));
+  DONE;
+})
+
+;; Expanders for builtins to extract vector registers from large
+;; opaque integer modes.
+
+;; D-register list.
+
+(define_expand "aarch64_get_dreg<VSTRUCT:mode><VDC:mode>"
+ [(match_operand:VDC 0 "register_operand" "=w")
+  (match_operand:VSTRUCT 1 "register_operand" "w")
+  (match_operand:SI 2 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  int part = INTVAL (operands[2]);
+  rtx temp = gen_reg_rtx (<VDC:VDBL>mode);
+  int offset = part * 16;
+
+  emit_move_insn (temp, gen_rtx_SUBREG (<VDC:VDBL>mode, operands[1], offset));
+  emit_move_insn (operands[0], gen_lowpart (<VDC:MODE>mode, temp));
+  DONE;
+})
+
+;; Q-register list.
+
+(define_expand "aarch64_get_qreg<VSTRUCT:mode><VQ:mode>"
+ [(match_operand:VQ 0 "register_operand" "=w")
+  (match_operand:VSTRUCT 1 "register_operand" "w")
+  (match_operand:SI 2 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  int part = INTVAL (operands[2]);
+  int offset = part * 16;
+
+  emit_move_insn (operands[0],
+                 gen_rtx_SUBREG (<VQ:MODE>mode, operands[1], offset));
+  DONE;
+})
+
+;; Permuted-store expanders for neon intrinsics.
+
+(define_insn "aarch64_st2<mode>_dreg"
+  [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv")
+       (unspec:TI [(match_operand:OI 1 "register_operand" "w")
+                    (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+                   UNSPEC_ST2))]
+  "TARGET_SIMD"
+  "st2\\t{%S1.<Vtype> - %T1.<Vtype>}, %0"
+  [(set_attr "simd_type" "simd_store2")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_st2<mode>_dreg"
+  [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv")
+       (unspec:TI [(match_operand:OI 1 "register_operand" "w")
+                    (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+                   UNSPEC_ST2))]
+  "TARGET_SIMD"
+  "st1\\t{%S1.1d - %T1.1d}, %0"
+  [(set_attr "simd_type" "simd_store2")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_st3<mode>_dreg"
+  [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv")
+       (unspec:EI [(match_operand:CI 1 "register_operand" "w")
+                    (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+                   UNSPEC_ST3))]
+  "TARGET_SIMD"
+  "st3\\t{%S1.<Vtype> - %U1.<Vtype>}, %0"
+  [(set_attr "simd_type" "simd_store3")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_st3<mode>_dreg"
+  [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv")
+       (unspec:EI [(match_operand:CI 1 "register_operand" "w")
+                    (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+                   UNSPEC_ST3))]
+  "TARGET_SIMD"
+  "st1\\t{%S1.1d - %U1.1d}, %0"
+  [(set_attr "simd_type" "simd_store3")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_st4<mode>_dreg"
+  [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
+       (unspec:OI [(match_operand:XI 1 "register_operand" "w")
+                    (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+                   UNSPEC_ST4))]
+  "TARGET_SIMD"
+  "st4\\t{%S1.<Vtype> - %V1.<Vtype>}, %0"
+  [(set_attr "simd_type" "simd_store4")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_st4<mode>_dreg"
+  [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
+       (unspec:OI [(match_operand:XI 1 "register_operand" "w")
+                    (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+                   UNSPEC_ST4))]
+  "TARGET_SIMD"
+  "st1\\t{%S1.1d - %V1.1d}, %0"
+  [(set_attr "simd_type" "simd_store4")
+   (set_attr "simd_mode" "<MODE>")])
+
+(define_expand "aarch64_st<VSTRUCT:nregs><VDC:mode>"
+ [(match_operand:DI 0 "register_operand" "r")
+  (match_operand:VSTRUCT 1 "register_operand" "w")
+  (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+  "TARGET_SIMD"
+{
+  enum machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode;
+  rtx mem = gen_rtx_MEM (mode, operands[0]);
+
+  emit_insn (gen_aarch64_st<VSTRUCT:nregs><VDC:mode>_dreg (mem, operands[1]));
+  DONE;
+})
+
+(define_expand "aarch64_st<VSTRUCT:nregs><VQ:mode>"
+ [(match_operand:DI 0 "register_operand" "r")
+  (match_operand:VSTRUCT 1 "register_operand" "w")
+  (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+  "TARGET_SIMD"
+{
+  enum machine_mode mode = <VSTRUCT:MODE>mode;
+  rtx mem = gen_rtx_MEM (mode, operands[0]);
+
+  emit_insn (gen_vec_store_lanes<VSTRUCT:mode><VQ:mode> (mem, operands[1]));
+  DONE;
+})
+
+;; Expander for builtins to insert vector registers into large
+;; opaque integer modes.
+
+;; Q-register list.  We don't need a D-reg inserter as we zero
+;; extend them in arm_neon.h and insert the resulting Q-regs.
+
+(define_expand "aarch64_set_qreg<VSTRUCT:mode><VQ:mode>"
+ [(match_operand:VSTRUCT 0 "register_operand" "+w")
+  (match_operand:VSTRUCT 1 "register_operand" "0")
+  (match_operand:VQ 2 "register_operand" "w")
+  (match_operand:SI 3 "immediate_operand" "i")]
+  "TARGET_SIMD"
+{
+  int part = INTVAL (operands[3]);
+  int offset = part * 16;
+
+  emit_move_insn (operands[0], operands[1]);
+  emit_move_insn (gen_rtx_SUBREG (<VQ:MODE>mode, operands[0], offset),
+                 operands[2]);
+  DONE;
+})
+
diff --git a/gcc/config/aarch64/aarch64-tune.md b/gcc/config/aarch64/aarch64-tune.md
new file mode 100644 (file)
index 0000000..a654a91
--- /dev/null
@@ -0,0 +1,5 @@
+;; -*- buffer-read-only: t -*-
+;; Generated automatically by gentune.sh from aarch64-cores.def
+(define_attr "tune"
+       "large,small"
+       (const (symbol_ref "((enum attr_tune) aarch64_tune)")))
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
new file mode 100644 (file)
index 0000000..1bc0c8a
--- /dev/null
@@ -0,0 +1,7011 @@
+/* Machine description for AArch64 architecture.
+   Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+   Contributed by ARM Ltd.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "insn-codes.h"
+#include "rtl.h"
+#include "insn-attr.h"
+#include "tree.h"
+#include "regs.h"
+#include "df.h"
+#include "hard-reg-set.h"
+#include "output.h"
+#include "expr.h"
+#include "reload.h"
+#include "toplev.h"
+#include "target.h"
+#include "target-def.h"
+#include "targhooks.h"
+#include "ggc.h"
+#include "function.h"
+#include "tm_p.h"
+#include "recog.h"
+#include "langhooks.h"
+#include "diagnostic-core.h"
+#include "gimple.h"
+#include "optabs.h"
+#include "dwarf2.h"
+
+/* Classifies an address.
+
+   ADDRESS_REG_IMM
+       A simple base register plus immediate offset.
+
+   ADDRESS_REG_WB
+       A base register indexed by immediate offset with writeback.
+
+   ADDRESS_REG_REG
+       A base register indexed by (optionally scaled) register.
+
+   ADDRESS_REG_UXTW
+       A base register indexed by (optionally scaled) zero-extended register.
+
+   ADDRESS_REG_SXTW
+       A base register indexed by (optionally scaled) sign-extended register.
+
+   ADDRESS_LO_SUM
+       A LO_SUM rtx with a base register and "LO12" symbol relocation.
+
+   ADDRESS_SYMBOLIC:
+       A constant symbolic address, in pc-relative literal pool.  */
+
+enum aarch64_address_type {
+  ADDRESS_REG_IMM,
+  ADDRESS_REG_WB,
+  ADDRESS_REG_REG,
+  ADDRESS_REG_UXTW,
+  ADDRESS_REG_SXTW,
+  ADDRESS_LO_SUM,
+  ADDRESS_SYMBOLIC
+};
+
+struct aarch64_address_info {
+  enum aarch64_address_type type;
+  rtx base;
+  rtx offset;
+  int shift;
+  enum aarch64_symbol_type symbol_type;
+};
+
+/* The current code model.  */
+enum aarch64_code_model aarch64_cmodel;
+
+#ifdef HAVE_AS_TLS
+#undef TARGET_HAVE_TLS
+#define TARGET_HAVE_TLS 1
+#endif
+
+static bool aarch64_composite_type_p (const_tree, enum machine_mode);
+static bool aarch64_vfp_is_call_or_return_candidate (enum machine_mode,
+                                                    const_tree,
+                                                    enum machine_mode *, int *,
+                                                    bool *);
+static void aarch64_elf_asm_constructor (rtx, int) ATTRIBUTE_UNUSED;
+static void aarch64_elf_asm_destructor (rtx, int) ATTRIBUTE_UNUSED;
+static rtx aarch64_load_tp (rtx);
+static void aarch64_override_options_after_change (void);
+static int aarch64_simd_valid_immediate (rtx, enum machine_mode, int, rtx *,
+                                        int *, unsigned char *, int *, int *);
+static bool aarch64_vector_mode_supported_p (enum machine_mode);
+static unsigned bit_count (unsigned HOST_WIDE_INT);
+static bool aarch64_const_vec_all_same_int_p (rtx,
+                                             HOST_WIDE_INT, HOST_WIDE_INT);
+
+/* The processor for which instructions should be scheduled.  */
+enum aarch64_processor aarch64_tune = generic;
+
+/* The current tuning set.  */
+const struct tune_params *aarch64_tune_params;
+
+/* Mask to specify which instructions we are allowed to generate.  */
+unsigned long aarch64_isa_flags = 0;
+
+/* Mask to specify which instruction scheduling options should be used.  */
+unsigned long aarch64_tune_flags = 0;
+
+/* Tuning parameters.  */
+
+#if HAVE_DESIGNATED_INITIALIZERS
+#define NAMED_PARAM(NAME, VAL) .NAME = (VAL)
+#else
+#define NAMED_PARAM(NAME, VAL) (VAL)
+#endif
+
+#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
+__extension__
+#endif
+static const struct cpu_rtx_cost_table generic_rtx_cost_table =
+{
+  NAMED_PARAM (memory_load, COSTS_N_INSNS (1)),
+  NAMED_PARAM (memory_store, COSTS_N_INSNS (0)),
+  NAMED_PARAM (register_shift, COSTS_N_INSNS (1)),
+  NAMED_PARAM (int_divide, COSTS_N_INSNS (6)),
+  NAMED_PARAM (float_divide, COSTS_N_INSNS (2)),
+  NAMED_PARAM (double_divide, COSTS_N_INSNS (6)),
+  NAMED_PARAM (int_multiply, COSTS_N_INSNS (1)),
+  NAMED_PARAM (int_multiply_extend, COSTS_N_INSNS (1)),
+  NAMED_PARAM (int_multiply_add, COSTS_N_INSNS (1)),
+  NAMED_PARAM (int_multiply_extend_add, COSTS_N_INSNS (1)),
+  NAMED_PARAM (float_multiply, COSTS_N_INSNS (0)),
+  NAMED_PARAM (double_multiply, COSTS_N_INSNS (1))
+};
+
+#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
+__extension__
+#endif
+static const struct cpu_addrcost_table generic_addrcost_table =
+{
+  NAMED_PARAM (pre_modify, 0),
+  NAMED_PARAM (post_modify, 0),
+  NAMED_PARAM (register_offset, 0),
+  NAMED_PARAM (register_extend, 0),
+  NAMED_PARAM (imm_offset, 0)
+};
+
+#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
+__extension__
+#endif
+static const struct cpu_regmove_cost generic_regmove_cost =
+{
+  NAMED_PARAM (GP2GP, 1),
+  NAMED_PARAM (GP2FP, 2),
+  NAMED_PARAM (FP2GP, 2),
+  /* We currently do not provide direct support for TFmode Q->Q move.
+     Therefore we need to raise the cost above 2 in order to have
+     reload handle the situation.  */
+  NAMED_PARAM (FP2FP, 4)
+};
+
+#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
+__extension__
+#endif
+static const struct tune_params generic_tunings =
+{
+  &generic_rtx_cost_table,
+  &generic_addrcost_table,
+  &generic_regmove_cost,
+  NAMED_PARAM (memmov_cost, 4)
+};
+
+/* A processor implementing AArch64.  */
+struct processor
+{
+  const char *const name;
+  enum aarch64_processor core;
+  const char *arch;
+  const unsigned long flags;
+  const struct tune_params *const tune;
+};
+
+/* Processor cores implementing AArch64.  */
+static const struct processor all_cores[] =
+{
+#define AARCH64_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
+  {NAME, IDENT, #ARCH, FLAGS | AARCH64_FL_FOR_ARCH##ARCH, &COSTS##_tunings},
+#include "aarch64-cores.def"
+#undef AARCH64_CORE
+  {"generic", generic, "8", AARCH64_FL_FPSIMD | AARCH64_FL_FOR_ARCH8, &generic_tunings},
+  {NULL, aarch64_none, NULL, 0, NULL}
+};
+
+/* Architectures implementing AArch64.  */
+static const struct processor all_architectures[] =
+{
+#define AARCH64_ARCH(NAME, CORE, ARCH, FLAGS) \
+  {NAME, CORE, #ARCH, FLAGS, NULL},
+#include "aarch64-arches.def"
+#undef AARCH64_ARCH
+  {"generic", generic, "8", AARCH64_FL_FOR_ARCH8, NULL},
+  {NULL, aarch64_none, NULL, 0, NULL}
+};
+
+/* Target specification.  These are populated as commandline arguments
+   are processed, or NULL if not specified.  */
+static const struct processor *selected_arch;
+static const struct processor *selected_cpu;
+static const struct processor *selected_tune;
+
+#define AARCH64_CPU_DEFAULT_FLAGS ((selected_cpu) ? selected_cpu->flags : 0)
+
+/* An ISA extension in the co-processor and main instruction set space.  */
+struct aarch64_option_extension
+{
+  const char *const name;
+  const unsigned long flags_on;
+  const unsigned long flags_off;
+};
+
+/* ISA extensions in AArch64.  */
+static const struct aarch64_option_extension all_extensions[] =
+{
+#define AARCH64_OPT_EXTENSION(NAME, FLAGS_ON, FLAGS_OFF) \
+  {NAME, FLAGS_ON, FLAGS_OFF},
+#include "aarch64-option-extensions.def"
+#undef AARCH64_OPT_EXTENSION
+  {NULL, 0, 0}
+};
+
+/* Used to track the size of an address when generating a pre/post
+   increment address.  */
+static enum machine_mode aarch64_memory_reference_mode;
+
+/* Used to force GTY into this file.  */
+static GTY(()) int gty_dummy;
+
+/* A table of valid AArch64 "bitmask immediate" values for
+   logical instructions.  */
+
+#define AARCH64_NUM_BITMASKS  5334
+static unsigned HOST_WIDE_INT aarch64_bitmasks[AARCH64_NUM_BITMASKS];
+
+/* Did we set flag_omit_frame_pointer just so
+   aarch64_frame_pointer_required would be called? */
+static bool faked_omit_frame_pointer;
+
+typedef enum aarch64_cond_code
+{
+  AARCH64_EQ = 0, AARCH64_NE, AARCH64_CS, AARCH64_CC, AARCH64_MI, AARCH64_PL,
+  AARCH64_VS, AARCH64_VC, AARCH64_HI, AARCH64_LS, AARCH64_GE, AARCH64_LT,
+  AARCH64_GT, AARCH64_LE, AARCH64_AL, AARCH64_NV
+}
+aarch64_cc;
+
+#define AARCH64_INVERSE_CONDITION_CODE(X) ((aarch64_cc) (((int) X) ^ 1))
+
+/* The condition codes of the processor, and the inverse function.  */
+static const char * const aarch64_condition_codes[] =
+{
+  "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+  "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
+};
+
+/* Provide a mapping from gcc register numbers to dwarf register numbers.  */
+unsigned
+aarch64_dbx_register_number (unsigned regno)
+{
+   if (GP_REGNUM_P (regno))
+     return AARCH64_DWARF_R0 + regno - R0_REGNUM;
+   else if (regno == SP_REGNUM)
+     return AARCH64_DWARF_SP;
+   else if (FP_REGNUM_P (regno))
+     return AARCH64_DWARF_V0 + regno - V0_REGNUM;
+
+   /* Return values >= DWARF_FRAME_REGISTERS indicate that there is no
+      equivalent DWARF register.  */
+   return DWARF_FRAME_REGISTERS;
+}
+
+/* Return TRUE if MODE is any of the large INT modes.  */
+static bool
+aarch64_vect_struct_mode_p (enum machine_mode mode)
+{
+  return mode == OImode || mode == CImode || mode == XImode;
+}
+
+/* Return TRUE if MODE is any of the vector modes.  */
+static bool
+aarch64_vector_mode_p (enum machine_mode mode)
+{
+  return aarch64_vector_mode_supported_p (mode)
+        || aarch64_vect_struct_mode_p (mode);
+}
+
+/* Implement target hook TARGET_ARRAY_MODE_SUPPORTED_P.  */
+static bool
+aarch64_array_mode_supported_p (enum machine_mode mode,
+                               unsigned HOST_WIDE_INT nelems)
+{
+  if (TARGET_SIMD
+      && AARCH64_VALID_SIMD_QREG_MODE (mode)
+      && (nelems >= 2 && nelems <= 4))
+    return true;
+
+  return false;
+}
+
+/* Implement HARD_REGNO_NREGS.  */
+
+int
+aarch64_hard_regno_nregs (unsigned regno, enum machine_mode mode)
+{
+  switch (aarch64_regno_regclass (regno))
+    {
+    case FP_REGS:
+    case FP_LO_REGS:
+      return (GET_MODE_SIZE (mode) + UNITS_PER_VREG - 1) / UNITS_PER_VREG;
+    default:
+      return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+    }
+  gcc_unreachable ();
+}
+
+/* Implement HARD_REGNO_MODE_OK.  */
+
+int
+aarch64_hard_regno_mode_ok (unsigned regno, enum machine_mode mode)
+{
+  if (GET_MODE_CLASS (mode) == MODE_CC)
+    return regno == CC_REGNUM;
+
+  if (regno == SP_REGNUM || regno == FRAME_POINTER_REGNUM
+      || regno == ARG_POINTER_REGNUM)
+    return mode == Pmode;
+
+  if (GP_REGNUM_P (regno) && ! aarch64_vect_struct_mode_p (mode))
+    return 1;
+
+  if (FP_REGNUM_P (regno))
+    {
+      if (aarch64_vect_struct_mode_p (mode))
+       return
+         (regno + aarch64_hard_regno_nregs (regno, mode) - 1) <= V31_REGNUM;
+      else
+       return 1;
+    }
+
+  return 0;
+}
+
+/* Return true if calls to DECL should be treated as
+   long-calls (ie called via a register).  */
+static bool
+aarch64_decl_is_long_call_p (const_tree decl ATTRIBUTE_UNUSED)
+{
+  return false;
+}
+
+/* Return true if calls to symbol-ref SYM should be treated as
+   long-calls (ie called via a register).  */
+bool
+aarch64_is_long_call_p (rtx sym)
+{
+  return aarch64_decl_is_long_call_p (SYMBOL_REF_DECL (sym));
+}
+
+/* Return true if the offsets to a zero/sign-extract operation
+   represent an expression that matches an extend operation.  The
+   operands represent the paramters from
+
+   (extract (mult (reg) (mult_imm)) (extract_imm) (const_int 0)).  */
+bool
+aarch64_is_extend_from_extract (enum machine_mode mode, rtx mult_imm,
+                               rtx extract_imm)
+{
+  HOST_WIDE_INT mult_val, extract_val;
+
+  if (! CONST_INT_P (mult_imm) || ! CONST_INT_P (extract_imm))
+    return false;
+
+  mult_val = INTVAL (mult_imm);
+  extract_val = INTVAL (extract_imm);
+
+  if (extract_val > 8
+      && extract_val < GET_MODE_BITSIZE (mode)
+      && exact_log2 (extract_val & ~7) > 0
+      && (extract_val & 7) <= 4
+      && mult_val == (1 << (extract_val & 7)))
+    return true;
+
+  return false;
+}
+
+/* Emit an insn that's a simple single-set.  Both the operands must be
+   known to be valid.  */
+inline static rtx
+emit_set_insn (rtx x, rtx y)
+{
+  return emit_insn (gen_rtx_SET (VOIDmode, x, y));
+}
+
+/* X and Y are two things to compare using CODE.  Emit the compare insn and
+   return the rtx for register 0 in the proper mode.  */
+rtx
+aarch64_gen_compare_reg (RTX_CODE code, rtx x, rtx y)
+{
+  enum machine_mode mode = SELECT_CC_MODE (code, x, y);
+  rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
+
+  emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
+  return cc_reg;
+}
+
+/* Build the SYMBOL_REF for __tls_get_addr.  */
+
+static GTY(()) rtx tls_get_addr_libfunc;
+
+rtx
+aarch64_tls_get_addr (void)
+{
+  if (!tls_get_addr_libfunc)
+    tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
+  return tls_get_addr_libfunc;
+}
+
+/* Return the TLS model to use for ADDR.  */
+
+static enum tls_model
+tls_symbolic_operand_type (rtx addr)
+{
+  enum tls_model tls_kind = TLS_MODEL_NONE;
+  rtx sym, addend;
+
+  if (GET_CODE (addr) == CONST)
+    {
+      split_const (addr, &sym, &addend);
+      if (GET_CODE (sym) == SYMBOL_REF)
+       tls_kind = SYMBOL_REF_TLS_MODEL (sym);
+    }
+  else if (GET_CODE (addr) == SYMBOL_REF)
+    tls_kind = SYMBOL_REF_TLS_MODEL (addr);
+
+  return tls_kind;
+}
+
+/* We'll allow lo_sum's in addresses in our legitimate addresses
+   so that combine would take care of combining addresses where
+   necessary, but for generation purposes, we'll generate the address
+   as :
+   RTL                               Absolute
+   tmp = hi (symbol_ref);            adrp  x1, foo
+   dest = lo_sum (tmp, symbol_ref);  add dest, x1, :lo_12:foo
+                                     nop
+
+   PIC                               TLS
+   adrp x1, :got:foo                 adrp tmp, :tlsgd:foo
+   ldr  x1, [:got_lo12:foo]          add  dest, tmp, :tlsgd_lo12:foo
+                                     bl   __tls_get_addr
+                                     nop
+
+   Load TLS symbol, depending on TLS mechanism and TLS access model.
+
+   Global Dynamic - Traditional TLS:
+   adrp tmp, :tlsgd:imm
+   add  dest, tmp, #:tlsgd_lo12:imm
+   bl   __tls_get_addr
+
+   Global Dynamic - TLS Descriptors:
+   adrp dest, :tlsdesc:imm
+   ldr  tmp, [dest, #:tlsdesc_lo12:imm]
+   add  dest, dest, #:tlsdesc_lo12:imm
+   blr  tmp
+   mrs  tp, tpidr_el0
+   add  dest, dest, tp
+
+   Initial Exec:
+   mrs  tp, tpidr_el0
+   adrp tmp, :gottprel:imm
+   ldr  dest, [tmp, #:gottprel_lo12:imm]
+   add  dest, dest, tp
+
+   Local Exec:
+   mrs  tp, tpidr_el0
+   add  t0, tp, #:tprel_hi12:imm
+   add  t0, #:tprel_lo12_nc:imm
+*/
+
+static void
+aarch64_load_symref_appropriately (rtx dest, rtx imm,
+                                  enum aarch64_symbol_type type)
+{
+  switch (type)
+    {
+    case SYMBOL_SMALL_ABSOLUTE:
+      {
+       rtx tmp_reg = dest;
+       if (can_create_pseudo_p ())
+         {
+           tmp_reg =  gen_reg_rtx (Pmode);
+         }
+
+       emit_move_insn (tmp_reg, gen_rtx_HIGH (Pmode, imm));
+       emit_insn (gen_add_losym (dest, tmp_reg, imm));
+       return;
+      }
+
+    case SYMBOL_SMALL_GOT:
+      {
+       rtx tmp_reg = dest;
+       if (can_create_pseudo_p ())
+         {
+           tmp_reg =  gen_reg_rtx (Pmode);
+         }
+       emit_move_insn (tmp_reg, gen_rtx_HIGH (Pmode, imm));
+       emit_insn (gen_ldr_got_small (dest, tmp_reg, imm));
+       return;
+      }
+
+    case SYMBOL_SMALL_TLSGD:
+      {
+       rtx insns;
+       rtx result = gen_rtx_REG (Pmode, R0_REGNUM);
+
+       start_sequence ();
+       emit_call_insn (gen_tlsgd_small (result, imm));
+       insns = get_insns ();
+       end_sequence ();
+
+       RTL_CONST_CALL_P (insns) = 1;
+       emit_libcall_block (insns, dest, result, imm);
+       return;
+      }
+
+    case SYMBOL_SMALL_TLSDESC:
+      {
+       rtx x0 = gen_rtx_REG (Pmode, R0_REGNUM);
+       rtx tp;
+
+       emit_insn (gen_tlsdesc_small (imm));
+       tp = aarch64_load_tp (NULL);
+       emit_insn (gen_rtx_SET (Pmode, dest, gen_rtx_PLUS (Pmode, tp, x0)));
+       set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
+       return;
+      }
+
+    case SYMBOL_SMALL_GOTTPREL:
+      {
+       rtx tmp_reg = gen_reg_rtx (Pmode);
+       rtx tp = aarch64_load_tp (NULL);
+       emit_insn (gen_tlsie_small (tmp_reg, imm));
+       emit_insn (gen_rtx_SET (Pmode, dest, gen_rtx_PLUS (Pmode, tp, tmp_reg)));
+       set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
+       return;
+      }
+
+    case SYMBOL_SMALL_TPREL:
+      {
+       rtx tp = aarch64_load_tp (NULL);
+       emit_insn (gen_tlsle_small (dest, tp, imm));
+       set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
+       return;
+      }
+
+    default:
+      gcc_unreachable ();
+    }
+}
+
+/* Emit a move from SRC to DEST.  Assume that the move expanders can
+   handle all moves if !can_create_pseudo_p ().  The distinction is
+   important because, unlike emit_move_insn, the move expanders know
+   how to force Pmode objects into the constant pool even when the
+   constant pool address is not itself legitimate.  */
+static rtx
+aarch64_emit_move (rtx dest, rtx src)
+{
+  return (can_create_pseudo_p ()
+         ? emit_move_insn (dest, src)
+         : emit_move_insn_1 (dest, src));
+}
+
+void
+aarch64_split_128bit_move (rtx dst, rtx src)
+{
+  rtx low_dst;
+
+  gcc_assert (GET_MODE (dst) == TImode);
+
+  if (REG_P (dst) && REG_P (src))
+    {
+      int src_regno = REGNO (src);
+      int dst_regno = REGNO (dst);
+
+      gcc_assert (GET_MODE (src) == TImode);
+
+      /* Handle r -> w, w -> r.  */
+      if (FP_REGNUM_P (dst_regno) && GP_REGNUM_P (src_regno))
+       {
+         emit_insn (gen_aarch64_movtilow_di (dst,
+                                             gen_lowpart (word_mode, src)));
+         emit_insn (gen_aarch64_movtihigh_di (dst,
+                                              gen_highpart (word_mode, src)));
+         return;
+       }
+      else if (GP_REGNUM_P (dst_regno) && FP_REGNUM_P (src_regno))
+       {
+         emit_insn (gen_aarch64_movdi_tilow (gen_lowpart (word_mode, dst),
+                                             src));
+         emit_insn (gen_aarch64_movdi_tihigh (gen_highpart (word_mode, dst),
+                                              src));
+         return;
+       }
+      /* Fall through to r -> r cases.  */
+    }
+
+  low_dst = gen_lowpart (word_mode, dst);
+  if (REG_P (low_dst)
+      && reg_overlap_mentioned_p (low_dst, src))
+    {
+      aarch64_emit_move (gen_highpart (word_mode, dst),
+                        gen_highpart_mode (word_mode, TImode, src));
+      aarch64_emit_move (low_dst, gen_lowpart (word_mode, src));
+    }
+  else
+    {
+      aarch64_emit_move (low_dst, gen_lowpart (word_mode, src));
+      aarch64_emit_move (gen_highpart (word_mode, dst),
+                        gen_highpart_mode (word_mode, TImode, src));
+    }
+}
+
+bool
+aarch64_split_128bit_move_p (rtx dst, rtx src)
+{
+  return (! REG_P (src)
+         || ! (FP_REGNUM_P (REGNO (dst)) && FP_REGNUM_P (REGNO (src))));
+}
+
+static rtx
+aarch64_force_temporary (rtx x, rtx value)
+{
+  if (can_create_pseudo_p ())
+    return force_reg (Pmode, value);
+  else
+    {
+      x = aarch64_emit_move (x, value);
+      return x;
+    }
+}
+
+
+static rtx
+aarch64_add_offset (enum machine_mode mode, rtx temp, rtx reg, HOST_WIDE_INT offset)
+{
+  if (!aarch64_plus_immediate (GEN_INT (offset), DImode))
+    {
+      rtx high;
+      /* Load the full offset into a register.  This
+         might be improvable in the future.  */
+      high = GEN_INT (offset);
+      offset = 0;
+      high = aarch64_force_temporary (temp, high);
+      reg = aarch64_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
+    }
+  return plus_constant (mode, reg, offset);
+}
+
+void
+aarch64_expand_mov_immediate (rtx dest, rtx imm)
+{
+  enum machine_mode mode = GET_MODE (dest);
+  unsigned HOST_WIDE_INT mask;
+  int i;
+  bool first;
+  unsigned HOST_WIDE_INT val;
+  bool subtargets;
+  rtx subtarget;
+  int one_match, zero_match;
+
+  gcc_assert (mode == SImode || mode == DImode);
+
+  /* Check on what type of symbol it is.  */
+  if (GET_CODE (imm) == SYMBOL_REF
+      || GET_CODE (imm) == LABEL_REF
+      || GET_CODE (imm) == CONST)
+    {
+      rtx mem, base, offset;
+      enum aarch64_symbol_type sty;
+
+      /* If we have (const (plus symbol offset)), separate out the offset
+        before we start classifying the symbol.  */
+      split_const (imm, &base, &offset);
+
+      sty = aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR);
+      switch (sty)
+       {
+       case SYMBOL_FORCE_TO_MEM:
+         if (offset != const0_rtx
+             && targetm.cannot_force_const_mem (mode, imm))
+           {
+             gcc_assert(can_create_pseudo_p ());
+             base = aarch64_force_temporary (dest, base);
+             base = aarch64_add_offset (mode, NULL, base, INTVAL (offset));
+             aarch64_emit_move (dest, base);
+             return;
+           }
+         mem = force_const_mem (mode, imm);
+         gcc_assert (mem);
+         emit_insn (gen_rtx_SET (VOIDmode, dest, mem));
+         return;
+
+        case SYMBOL_SMALL_TLSGD:
+        case SYMBOL_SMALL_TLSDESC:
+        case SYMBOL_SMALL_GOTTPREL:
+       case SYMBOL_SMALL_GOT:
+         if (offset != const0_rtx)
+           {
+             gcc_assert(can_create_pseudo_p ());
+             base = aarch64_force_temporary (dest, base);
+             base = aarch64_add_offset (mode, NULL, base, INTVAL (offset));
+             aarch64_emit_move (dest, base);
+             return;
+           }
+         /* FALLTHRU */
+
+        case SYMBOL_SMALL_TPREL:
+       case SYMBOL_SMALL_ABSOLUTE:
+         aarch64_load_symref_appropriately (dest, imm, sty);
+         return;
+
+       default:
+         gcc_unreachable ();
+       }
+    }
+
+  if (CONST_INT_P (imm) && aarch64_move_imm (INTVAL (imm), mode))
+    {
+      emit_insn (gen_rtx_SET (VOIDmode, dest, imm));
+      return;
+    }
+
+  if (!CONST_INT_P (imm))
+    {
+      if (GET_CODE (imm) == HIGH)
+       emit_insn (gen_rtx_SET (VOIDmode, dest, imm));
+      else
+        {
+         rtx mem = force_const_mem (mode, imm);
+         gcc_assert (mem);
+         emit_insn (gen_rtx_SET (VOIDmode, dest, mem));
+       }
+
+      return;
+    }
+
+  if (mode == SImode)
+    {
+      /* We know we can't do this in 1 insn, and we must be able to do it
+        in two; so don't mess around looking for sequences that don't buy
+        us anything.  */
+      emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (INTVAL (imm) & 0xffff)));
+      emit_insn (gen_insv_immsi (dest, GEN_INT (16),
+                                GEN_INT ((INTVAL (imm) >> 16) & 0xffff)));
+      return;
+    }
+
+  /* Remaining cases are all for DImode.  */
+
+  val = INTVAL (imm);
+  subtargets = optimize && can_create_pseudo_p ();
+
+  one_match = 0;
+  zero_match = 0;
+  mask = 0xffff;
+
+  for (i = 0; i < 64; i += 16, mask <<= 16)
+    {
+      if ((val & mask) == 0)
+       zero_match++;
+      else if ((val & mask) == mask)
+       one_match++;
+    }
+
+  if (one_match == 2)
+    {
+      mask = 0xffff;
+      for (i = 0; i < 64; i += 16, mask <<= 16)
+       {
+         if ((val & mask) != mask)
+           {
+             emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (val | mask)));
+             emit_insn (gen_insv_immdi (dest, GEN_INT (i),
+                                        GEN_INT ((val >> i) & 0xffff)));
+             return;
+           }
+       }
+      gcc_unreachable ();
+    }
+
+  if (zero_match == 2)
+    goto simple_sequence;
+
+  mask = 0x0ffff0000UL;
+  for (i = 16; i < 64; i += 16, mask <<= 16)
+    {
+      HOST_WIDE_INT comp = mask & ~(mask - 1);
+
+      if (aarch64_uimm12_shift (val - (val & mask)))
+       {
+         subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
+
+         emit_insn (gen_rtx_SET (VOIDmode, subtarget, GEN_INT (val & mask)));
+         emit_insn (gen_adddi3 (dest, subtarget,
+                                GEN_INT (val - (val & mask))));
+         return;
+       }
+      else if (aarch64_uimm12_shift (-(val - ((val + comp) & mask))))
+       {
+         subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
+
+         emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+                                 GEN_INT ((val + comp) & mask)));
+         emit_insn (gen_adddi3 (dest, subtarget,
+                                GEN_INT (val - ((val + comp) & mask))));
+         return;
+       }
+      else if (aarch64_uimm12_shift (val - ((val - comp) | ~mask)))
+       {
+         subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
+
+         emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+                                 GEN_INT ((val - comp) | ~mask)));
+         emit_insn (gen_adddi3 (dest, subtarget,
+                                GEN_INT (val - ((val - comp) | ~mask))));
+         return;
+       }
+      else if (aarch64_uimm12_shift (-(val - (val | ~mask))))
+       {
+         subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
+
+         emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+                                 GEN_INT (val | ~mask)));
+         emit_insn (gen_adddi3 (dest, subtarget,
+                                GEN_INT (val - (val | ~mask))));
+         return;
+       }
+    }
+
+  /* See if we can do it by arithmetically combining two
+     immediates.  */
+  for (i = 0; i < AARCH64_NUM_BITMASKS; i++)
+    {
+      int j;
+      mask = 0xffff;
+
+      if (aarch64_uimm12_shift (val - aarch64_bitmasks[i])
+         || aarch64_uimm12_shift (-val + aarch64_bitmasks[i]))
+       {
+         subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
+         emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+                                 GEN_INT (aarch64_bitmasks[i])));
+         emit_insn (gen_adddi3 (dest, subtarget,
+                                GEN_INT (val - aarch64_bitmasks[i])));
+         return;
+       }
+
+      for (j = 0; j < 64; j += 16, mask <<= 16)
+       {
+         if ((aarch64_bitmasks[i] & ~mask) == (val & ~mask))
+           {
+             emit_insn (gen_rtx_SET (VOIDmode, dest,
+                                     GEN_INT (aarch64_bitmasks[i])));
+             emit_insn (gen_insv_immdi (dest, GEN_INT (j),
+                                        GEN_INT ((val >> j) & 0xffff)));
+             return;
+           }
+       }
+    }
+
+  /* See if we can do it by logically combining two immediates.  */
+  for (i = 0; i < AARCH64_NUM_BITMASKS; i++)
+    {
+      if ((aarch64_bitmasks[i] & val) == aarch64_bitmasks[i])
+       {
+         int j;
+
+         for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++)
+           if (val == (aarch64_bitmasks[i] | aarch64_bitmasks[j]))
+             {
+               subtarget = subtargets ? gen_reg_rtx (mode) : dest;
+               emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+                                       GEN_INT (aarch64_bitmasks[i])));
+               emit_insn (gen_iordi3 (dest, subtarget,
+                                      GEN_INT (aarch64_bitmasks[j])));
+               return;
+             }
+       }
+      else if ((val & aarch64_bitmasks[i]) == val)
+       {
+         int j;
+
+         for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++)
+           if (val == (aarch64_bitmasks[j] & aarch64_bitmasks[i]))
+             {
+
+               subtarget = subtargets ? gen_reg_rtx (mode) : dest;
+               emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+                                       GEN_INT (aarch64_bitmasks[j])));
+               emit_insn (gen_anddi3 (dest, subtarget,
+                                      GEN_INT (aarch64_bitmasks[i])));
+               return;
+             }
+       }
+    }
+
+ simple_sequence:
+  first = true;
+  mask = 0xffff;
+  for (i = 0; i < 64; i += 16, mask <<= 16)
+    {
+      if ((val & mask) != 0)
+       {
+         if (first)
+           {
+             emit_insn (gen_rtx_SET (VOIDmode, dest,
+                                     GEN_INT (val & mask)));
+             first = false;
+           }
+         else
+           emit_insn (gen_insv_immdi (dest, GEN_INT (i),
+                                      GEN_INT ((val >> i) & 0xffff)));
+       }
+    }
+}
+
+static bool
+aarch64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
+{
+  /* Indirect calls are not currently supported.  */
+  if (decl == NULL)
+    return false;
+
+  /* Cannot tail-call to long-calls, since these are outside of the
+     range of a branch instruction (we could handle this if we added
+     support for indirect tail-calls.  */
+  if (aarch64_decl_is_long_call_p (decl))
+    return false;
+
+  return true;
+}
+
+/* Implement TARGET_PASS_BY_REFERENCE.  */
+
+static bool
+aarch64_pass_by_reference (cumulative_args_t pcum ATTRIBUTE_UNUSED,
+                          enum machine_mode mode,
+                          const_tree type,
+                          bool named ATTRIBUTE_UNUSED)
+{
+  HOST_WIDE_INT size;
+  enum machine_mode dummymode;
+  int nregs;
+
+  /* GET_MODE_SIZE (BLKmode) is useless since it is 0.  */
+  size = (mode == BLKmode && type)
+    ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
+
+  if (type)
+    {
+      /* Arrays always passed by reference.  */
+      if (TREE_CODE (type) == ARRAY_TYPE)
+       return true;
+      /* Other aggregates based on their size.  */
+      if (AGGREGATE_TYPE_P (type))
+       size = int_size_in_bytes (type);
+    }
+
+  /* Variable sized arguments are always returned by reference.  */
+  if (size < 0)
+    return true;
+
+  /* Can this be a candidate to be passed in fp/simd register(s)?  */
+  if (aarch64_vfp_is_call_or_return_candidate (mode, type,
+                                              &dummymode, &nregs,
+                                              NULL))
+    return false;
+
+  /* Arguments which are variable sized or larger than 2 registers are
+     passed by reference unless they are a homogenous floating point
+     aggregate.  */
+  return size > 2 * UNITS_PER_WORD;
+}
+
+/* Return TRUE if VALTYPE is padded to its least significant bits.  */
+static bool
+aarch64_return_in_msb (const_tree valtype)
+{
+  enum machine_mode dummy_mode;
+  int dummy_int;
+
+  /* Never happens in little-endian mode.  */
+  if (!BYTES_BIG_ENDIAN)
+    return false;
+
+  /* Only composite types smaller than or equal to 16 bytes can
+     be potentially returned in registers.  */
+  if (!aarch64_composite_type_p (valtype, TYPE_MODE (valtype))
+      || int_size_in_bytes (valtype) <= 0
+      || int_size_in_bytes (valtype) > 16)
+    return false;
+
+  /* But not a composite that is an HFA (Homogeneous Floating-point Aggregate)
+     or an HVA (Homogeneous Short-Vector Aggregate); such a special composite
+     is always passed/returned in the least significant bits of fp/simd
+     register(s).  */
+  if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (valtype), valtype,
+                                              &dummy_mode, &dummy_int, NULL))
+    return false;
+
+  return true;
+}
+
+/* Implement TARGET_FUNCTION_VALUE.
+   Define how to find the value returned by a function.  */
+
+static rtx
+aarch64_function_value (const_tree type, const_tree func,
+                       bool outgoing ATTRIBUTE_UNUSED)
+{
+  enum machine_mode mode;
+  int unsignedp;
+  int count;
+  enum machine_mode ag_mode;
+
+  mode = TYPE_MODE (type);
+  if (INTEGRAL_TYPE_P (type))
+    mode = promote_function_mode (type, mode, &unsignedp, func, 1);
+
+  if (aarch64_return_in_msb (type))
+    {
+      HOST_WIDE_INT size = int_size_in_bytes (type);
+
+      if (size % UNITS_PER_WORD != 0)
+       {
+         size += UNITS_PER_WORD - size % UNITS_PER_WORD;
+         mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
+       }
+    }
+
+  if (aarch64_vfp_is_call_or_return_candidate (mode, type,
+                                              &ag_mode, &count, NULL))
+    {
+      if (!aarch64_composite_type_p (type, mode))
+       {
+         gcc_assert (count == 1 && mode == ag_mode);
+         return gen_rtx_REG (mode, V0_REGNUM);
+       }
+      else
+       {
+         int i;
+         rtx par;
+
+         par = gen_rtx_PARALLEL (mode, rtvec_alloc (count));
+         for (i = 0; i < count; i++)
+           {
+             rtx tmp = gen_rtx_REG (ag_mode, V0_REGNUM + i);
+             tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
+                                      GEN_INT (i * GET_MODE_SIZE (ag_mode)));
+             XVECEXP (par, 0, i) = tmp;
+           }
+         return par;
+       }
+    }
+  else
+    return gen_rtx_REG (mode, R0_REGNUM);
+}
+
+/* Implements TARGET_FUNCTION_VALUE_REGNO_P.
+   Return true if REGNO is the number of a hard register in which the values
+   of called function may come back.  */
+
+static bool
+aarch64_function_value_regno_p (const unsigned int regno)
+{
+  /* Maximum of 16 bytes can be returned in the general registers.  Examples
+     of 16-byte return values are: 128-bit integers and 16-byte small
+     structures (excluding homogeneous floating-point aggregates).  */
+  if (regno == R0_REGNUM || regno == R1_REGNUM)
+    return true;
+
+  /* Up to four fp/simd registers can return a function value, e.g. a
+     homogeneous floating-point aggregate having four members.  */
+  if (regno >= V0_REGNUM && regno < V0_REGNUM + HA_MAX_NUM_FLDS)
+    return !TARGET_GENERAL_REGS_ONLY;
+
+  return false;
+}
+
+/* Implement TARGET_RETURN_IN_MEMORY.
+
+   If the type T of the result of a function is such that
+     void func (T arg)
+   would require that arg be passed as a value in a register (or set of
+   registers) according to the parameter passing rules, then the result
+   is returned in the same registers as would be used for such an
+   argument.  */
+
+static bool
+aarch64_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
+{
+  HOST_WIDE_INT size;
+  enum machine_mode ag_mode;
+  int count;
+
+  if (!AGGREGATE_TYPE_P (type)
+      && TREE_CODE (type) != COMPLEX_TYPE
+      && TREE_CODE (type) != VECTOR_TYPE)
+    /* Simple scalar types always returned in registers.  */
+    return false;
+
+  if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (type),
+                                              type,
+                                              &ag_mode,
+                                              &count,
+                                              NULL))
+    return false;
+
+  /* Types larger than 2 registers returned in memory.  */
+  size = int_size_in_bytes (type);
+  return (size < 0 || size > 2 * UNITS_PER_WORD);
+}
+
+static bool
+aarch64_vfp_is_call_candidate (cumulative_args_t pcum_v, enum machine_mode mode,
+                              const_tree type, int *nregs)
+{
+  CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
+  return aarch64_vfp_is_call_or_return_candidate (mode,
+                                                 type,
+                                                 &pcum->aapcs_vfp_rmode,
+                                                 nregs,
+                                                 NULL);
+}
+
+/* Given MODE and TYPE of a function argument, return the alignment in
+   bits.  The idea is to suppress any stronger alignment requested by
+   the user and opt for the natural alignment (specified in AAPCS64 \S 4.1).
+   This is a helper function for local use only.  */
+
+static unsigned int
+aarch64_function_arg_alignment (enum machine_mode mode, const_tree type)
+{
+  unsigned int alignment;
+
+  if (type)
+    {
+      if (!integer_zerop (TYPE_SIZE (type)))
+       {
+         if (TYPE_MODE (type) == mode)
+           alignment = TYPE_ALIGN (type);
+         else
+           alignment = GET_MODE_ALIGNMENT (mode);
+       }
+      else
+       alignment = 0;
+    }
+  else
+    alignment = GET_MODE_ALIGNMENT (mode);
+
+  return alignment;
+}
+
+/* Layout a function argument according to the AAPCS64 rules.  The rule
+   numbers refer to the rule numbers in the AAPCS64.  */
+
+static void
+aarch64_layout_arg (cumulative_args_t pcum_v, enum machine_mode mode,
+                   const_tree type,
+                   bool named ATTRIBUTE_UNUSED)
+{
+  CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
+  int ncrn, nvrn, nregs;
+  bool allocate_ncrn, allocate_nvrn;
+
+  /* We need to do this once per argument.  */
+  if (pcum->aapcs_arg_processed)
+    return;
+
+  pcum->aapcs_arg_processed = true;
+
+  allocate_ncrn = (type) ? !(FLOAT_TYPE_P (type)) : !FLOAT_MODE_P (mode);
+  allocate_nvrn = aarch64_vfp_is_call_candidate (pcum_v,
+                                                mode,
+                                                type,
+                                                &nregs);
+
+  /* allocate_ncrn may be false-positive, but allocate_nvrn is quite reliable.
+     The following code thus handles passing by SIMD/FP registers first.  */
+
+  nvrn = pcum->aapcs_nvrn;
+
+  /* C1 - C5 for floating point, homogenous floating point aggregates (HFA)
+     and homogenous short-vector aggregates (HVA).  */
+  if (allocate_nvrn)
+    {
+      if (nvrn + nregs <= NUM_FP_ARG_REGS)
+       {
+         pcum->aapcs_nextnvrn = nvrn + nregs;
+         if (!aarch64_composite_type_p (type, mode))
+           {
+             gcc_assert (nregs == 1);
+             pcum->aapcs_reg = gen_rtx_REG (mode, V0_REGNUM + nvrn);
+           }
+         else
+           {
+             rtx par;
+             int i;
+             par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
+             for (i = 0; i < nregs; i++)
+               {
+                 rtx tmp = gen_rtx_REG (pcum->aapcs_vfp_rmode,
+                                        V0_REGNUM + nvrn + i);
+                 tmp = gen_rtx_EXPR_LIST
+                   (VOIDmode, tmp,
+                    GEN_INT (i * GET_MODE_SIZE (pcum->aapcs_vfp_rmode)));
+                 XVECEXP (par, 0, i) = tmp;
+               }
+             pcum->aapcs_reg = par;
+           }
+         return;
+       }
+      else
+       {
+         /* C.3 NSRN is set to 8.  */
+         pcum->aapcs_nextnvrn = NUM_FP_ARG_REGS;
+         goto on_stack;
+       }
+    }
+
+  ncrn = pcum->aapcs_ncrn;
+  nregs = ((type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode))
+          + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+
+  /* C6 - C9.  though the sign and zero extension semantics are
+     handled elsewhere.  This is the case where the argument fits
+     entirely general registers.  */
+  if (allocate_ncrn && (ncrn + nregs <= NUM_ARG_REGS))
+    {
+      unsigned int alignment = aarch64_function_arg_alignment (mode, type);
+
+      gcc_assert (nregs == 0 || nregs == 1 || nregs == 2);
+
+      /* C.8 if the argument has an alignment of 16 then the NGRN is
+         rounded up to the next even number.  */
+      if (nregs == 2 && alignment == 16 * BITS_PER_UNIT && ncrn % 2)
+       {
+         ++ncrn;
+         gcc_assert (ncrn + nregs <= NUM_ARG_REGS);
+       }
+      /* NREGS can be 0 when e.g. an empty structure is to be passed.
+         A reg is still generated for it, but the caller should be smart
+        enough not to use it.  */
+      if (nregs == 0 || nregs == 1 || GET_MODE_CLASS (mode) == MODE_INT)
+       {
+         pcum->aapcs_reg = gen_rtx_REG (mode, R0_REGNUM + ncrn);
+       }
+      else
+       {
+         rtx par;
+         int i;
+
+         par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
+         for (i = 0; i < nregs; i++)
+           {
+             rtx tmp = gen_rtx_REG (word_mode, R0_REGNUM + ncrn + i);
+             tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
+                                      GEN_INT (i * UNITS_PER_WORD));
+             XVECEXP (par, 0, i) = tmp;
+           }
+         pcum->aapcs_reg = par;
+       }
+
+      pcum->aapcs_nextncrn = ncrn + nregs;
+      return;
+    }
+
+  /* C.11  */
+  pcum->aapcs_nextncrn = NUM_ARG_REGS;
+
+  /* The argument is passed on stack; record the needed number of words for
+     this argument (we can re-use NREGS) and align the total size if
+     necessary.  */
+on_stack:
+  pcum->aapcs_stack_words = nregs;
+  if (aarch64_function_arg_alignment (mode, type) == 16 * BITS_PER_UNIT)
+    pcum->aapcs_stack_size = AARCH64_ROUND_UP (pcum->aapcs_stack_size,
+                                              16 / UNITS_PER_WORD) + 1;
+  return;
+}
+
+/* Implement TARGET_FUNCTION_ARG.  */
+
+static rtx
+aarch64_function_arg (cumulative_args_t pcum_v, enum machine_mode mode,
+                     const_tree type, bool named)
+{
+  CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
+  gcc_assert (pcum->pcs_variant == ARM_PCS_AAPCS64);
+
+  if (mode == VOIDmode)
+    return NULL_RTX;
+
+  aarch64_layout_arg (pcum_v, mode, type, named);
+  return pcum->aapcs_reg;
+}
+
+void
+aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
+                          const_tree fntype ATTRIBUTE_UNUSED,
+                          rtx libname ATTRIBUTE_UNUSED,
+                          const_tree fndecl ATTRIBUTE_UNUSED,
+                          unsigned n_named ATTRIBUTE_UNUSED)
+{
+  pcum->aapcs_ncrn = 0;
+  pcum->aapcs_nvrn = 0;
+  pcum->aapcs_nextncrn = 0;
+  pcum->aapcs_nextnvrn = 0;
+  pcum->pcs_variant = ARM_PCS_AAPCS64;
+  pcum->aapcs_reg = NULL_RTX;
+  pcum->aapcs_arg_processed = false;
+  pcum->aapcs_stack_words = 0;
+  pcum->aapcs_stack_size = 0;
+
+  return;
+}
+
+static void
+aarch64_function_arg_advance (cumulative_args_t pcum_v,
+                             enum machine_mode mode,
+                             const_tree type,
+                             bool named)
+{
+  CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
+  if (pcum->pcs_variant == ARM_PCS_AAPCS64)
+    {
+      aarch64_layout_arg (pcum_v, mode, type, named);
+      gcc_assert ((pcum->aapcs_reg != NULL_RTX)
+                 != (pcum->aapcs_stack_words != 0));
+      pcum->aapcs_arg_processed = false;
+      pcum->aapcs_ncrn = pcum->aapcs_nextncrn;
+      pcum->aapcs_nvrn = pcum->aapcs_nextnvrn;
+      pcum->aapcs_stack_size += pcum->aapcs_stack_words;
+      pcum->aapcs_stack_words = 0;
+      pcum->aapcs_reg = NULL_RTX;
+    }
+}
+
+bool
+aarch64_function_arg_regno_p (unsigned regno)
+{
+  return ((GP_REGNUM_P (regno) && regno < R0_REGNUM + NUM_ARG_REGS)
+         || (FP_REGNUM_P (regno) && regno < V0_REGNUM + NUM_FP_ARG_REGS));
+}
+
+/* Implement FUNCTION_ARG_BOUNDARY.  Every parameter gets at least
+   PARM_BOUNDARY bits of alignment, but will be given anything up
+   to STACK_BOUNDARY bits if the type requires it.  This makes sure
+   that both before and after the layout of each argument, the Next
+   Stacked Argument Address (NSAA) will have a minimum alignment of
+   8 bytes.  */
+
+static unsigned int
+aarch64_function_arg_boundary (enum machine_mode mode, const_tree type)
+{
+  unsigned int alignment = aarch64_function_arg_alignment (mode, type);
+
+  if (alignment < PARM_BOUNDARY)
+    alignment = PARM_BOUNDARY;
+  if (alignment > STACK_BOUNDARY)
+    alignment = STACK_BOUNDARY;
+  return alignment;
+}
+
+/* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
+
+   Return true if an argument passed on the stack should be padded upwards,
+   i.e. if the least-significant byte of the stack slot has useful data.
+
+   Small aggregate types are placed in the lowest memory address.
+
+   The related parameter passing rules are B.4, C.3, C.5 and C.14.  */
+
+bool
+aarch64_pad_arg_upward (enum machine_mode mode, const_tree type)
+{
+  /* On little-endian targets, the least significant byte of every stack
+     argument is passed at the lowest byte address of the stack slot.  */
+  if (!BYTES_BIG_ENDIAN)
+    return true;
+
+  /* Otherwise, integral types and floating point types are padded downward:
+     the least significant byte of a stack argument is passed at the highest
+     byte address of the stack slot.  */
+  if (type
+      ? (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type))
+      : (SCALAR_INT_MODE_P (mode) || SCALAR_FLOAT_MODE_P (mode)))
+    return false;
+
+  /* Everything else padded upward, i.e. data in first byte of stack slot.  */
+  return true;
+}
+
+/* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
+
+   It specifies padding for the last (may also be the only)
+   element of a block move between registers and memory.  If
+   assuming the block is in the memory, padding upward means that
+   the last element is padded after its highest significant byte,
+   while in downward padding, the last element is padded at the
+   its least significant byte side.
+
+   Small aggregates and small complex types are always padded
+   upwards.
+
+   We don't need to worry about homogeneous floating-point or
+   short-vector aggregates; their move is not affected by the
+   padding direction determined here.  Regardless of endianness,
+   each element of such an aggregate is put in the least
+   significant bits of a fp/simd register.
+
+   Return !BYTES_BIG_ENDIAN if the least significant byte of the
+   register has useful data, and return the opposite if the most
+   significant byte does.  */
+
+bool
+aarch64_pad_reg_upward (enum machine_mode mode, const_tree type,
+                    bool first ATTRIBUTE_UNUSED)
+{
+
+  /* Small composite types are always padded upward.  */
+  if (BYTES_BIG_ENDIAN && aarch64_composite_type_p (type, mode))
+    {
+      HOST_WIDE_INT size = (type ? int_size_in_bytes (type)
+                           : GET_MODE_SIZE (mode));
+      if (size < 2 * UNITS_PER_WORD)
+       return true;
+    }
+
+  /* Otherwise, use the default padding.  */
+  return !BYTES_BIG_ENDIAN;
+}
+
+static enum machine_mode
+aarch64_libgcc_cmp_return_mode (void)
+{
+  return SImode;
+}
+
+static bool
+aarch64_frame_pointer_required (void)
+{
+  /* If the function contains dynamic stack allocations, we need to
+     use the frame pointer to access the static parts of the frame.  */
+  if (cfun->calls_alloca)
+    return true;
+
+  /* We may have turned flag_omit_frame_pointer on in order to have this
+     function called; if we did, we also set the 'faked_omit_frame_pointer' flag
+     and we'll check it here.
+     If we really did set flag_omit_frame_pointer normally, then we return false
+     (no frame pointer required) in all cases.  */
+
+  if (flag_omit_frame_pointer && !faked_omit_frame_pointer)
+    return false;
+  else if (flag_omit_leaf_frame_pointer)
+    return !crtl->is_leaf;
+  return true;
+}
+
+/* Mark the registers that need to be saved by the callee and calculate
+   the size of the callee-saved registers area and frame record (both FP
+   and LR may be omitted).  */
+static void
+aarch64_layout_frame (void)
+{
+  HOST_WIDE_INT offset = 0;
+  int regno;
+
+  if (reload_completed && cfun->machine->frame.laid_out)
+    return;
+
+  cfun->machine->frame.fp_lr_offset = 0;
+
+  /* First mark all the registers that really need to be saved...  */
+  for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
+    cfun->machine->frame.reg_offset[regno] = -1;
+
+  for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+    cfun->machine->frame.reg_offset[regno] = -1;
+
+  /* ... that includes the eh data registers (if needed)...  */
+  if (crtl->calls_eh_return)
+    for (regno = 0; EH_RETURN_DATA_REGNO (regno) != INVALID_REGNUM; regno++)
+      cfun->machine->frame.reg_offset[EH_RETURN_DATA_REGNO (regno)] = 0;
+
+  /* ... and any callee saved register that dataflow says is live.  */
+  for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
+    if (df_regs_ever_live_p (regno)
+       && !call_used_regs[regno])
+      cfun->machine->frame.reg_offset[regno] = 0;
+
+  for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+    if (df_regs_ever_live_p (regno)
+       && !call_used_regs[regno])
+      cfun->machine->frame.reg_offset[regno] = 0;
+
+  if (frame_pointer_needed)
+    {
+      cfun->machine->frame.reg_offset[R30_REGNUM] = 0;
+      cfun->machine->frame.reg_offset[R29_REGNUM] = 0;
+      cfun->machine->frame.hardfp_offset = 2 * UNITS_PER_WORD;
+    }
+
+  /* Now assign stack slots for them.  */
+  for (regno = R0_REGNUM; regno <= R28_REGNUM; regno++)
+    if (cfun->machine->frame.reg_offset[regno] != -1)
+      {
+       cfun->machine->frame.reg_offset[regno] = offset;
+       offset += UNITS_PER_WORD;
+      }
+
+  for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+    if (cfun->machine->frame.reg_offset[regno] != -1)
+      {
+       cfun->machine->frame.reg_offset[regno] = offset;
+       offset += UNITS_PER_WORD;
+      }
+
+  if (frame_pointer_needed)
+    {
+      cfun->machine->frame.reg_offset[R29_REGNUM] = offset;
+      offset += UNITS_PER_WORD;
+      cfun->machine->frame.fp_lr_offset = UNITS_PER_WORD;
+    }
+
+  if (cfun->machine->frame.reg_offset[R30_REGNUM] != -1)
+    {
+      cfun->machine->frame.reg_offset[R30_REGNUM] = offset;
+      offset += UNITS_PER_WORD;
+      cfun->machine->frame.fp_lr_offset += UNITS_PER_WORD;
+    }
+
+  cfun->machine->frame.padding0 =
+    (AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT) - offset);
+  offset = AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+
+  cfun->machine->frame.saved_regs_size = offset;
+  cfun->machine->frame.laid_out = true;
+}
+
+/* Make the last instruction frame-related and note that it performs
+   the operation described by FRAME_PATTERN.  */
+
+static void
+aarch64_set_frame_expr (rtx frame_pattern)
+{
+  rtx insn;
+
+  insn = get_last_insn ();
+  RTX_FRAME_RELATED_P (insn) = 1;
+  RTX_FRAME_RELATED_P (frame_pattern) = 1;
+  REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+                                     frame_pattern,
+                                     REG_NOTES (insn));
+}
+
+static bool
+aarch64_register_saved_on_entry (int regno)
+{
+  return cfun->machine->frame.reg_offset[regno] != -1;
+}
+
+
+static void
+aarch64_save_or_restore_fprs (int start_offset, int increment,
+                             bool restore, rtx base_rtx)
+
+{
+  unsigned regno;
+  unsigned regno2;
+  rtx insn;
+  rtx (*gen_mem_ref)(enum machine_mode, rtx) = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM;
+
+
+  for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+    {
+      if (aarch64_register_saved_on_entry (regno))
+       {
+         rtx mem;
+         mem = gen_mem_ref (DFmode,
+                            plus_constant (Pmode,
+                                           base_rtx,
+                                           start_offset));
+
+         for (regno2 = regno + 1;
+              regno2 <= V31_REGNUM
+                && !aarch64_register_saved_on_entry (regno2);
+              regno2++)
+           {
+             /* Empty loop.  */
+           }
+         if (regno2 <= V31_REGNUM &&
+             aarch64_register_saved_on_entry (regno2))
+           {
+             rtx mem2;
+             /* Next highest register to be saved.  */
+             mem2 = gen_mem_ref (DFmode,
+                                 plus_constant
+                                 (Pmode,
+                                  base_rtx,
+                                  start_offset + increment));
+             if (restore == false)
+               {
+                 insn = emit_insn
+                   ( gen_store_pairdf (mem, gen_rtx_REG (DFmode, regno),
+                                       mem2, gen_rtx_REG (DFmode, regno2)));
+
+               }
+             else
+               {
+                 insn = emit_insn
+                   ( gen_load_pairdf (gen_rtx_REG (DFmode, regno), mem,
+                                      gen_rtx_REG (DFmode, regno2), mem2));
+
+                 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DFmode, regno));
+                 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DFmode, regno2));
+               }
+
+                 /* The first part of a frame-related parallel insn
+                    is always assumed to be relevant to the frame
+                    calculations; subsequent parts, are only
+                    frame-related if explicitly marked.  */
+             RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0,
+                                           1)) = 1;
+             regno = regno2;
+             start_offset += increment * 2;
+           }
+         else
+           {
+             if (restore == false)
+               insn = emit_move_insn (mem, gen_rtx_REG (DFmode, regno));
+             else
+               {
+                 insn = emit_move_insn (gen_rtx_REG (DFmode, regno), mem);
+                 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
+               }
+             start_offset += increment;
+           }
+         RTX_FRAME_RELATED_P (insn) = 1;
+       }
+    }
+
+}
+
+
+/* offset from the stack pointer of where the saves and
+   restore's have to happen.  */
+static void
+aarch64_save_or_restore_callee_save_registers (HOST_WIDE_INT offset,
+                                           bool restore)
+{
+  rtx insn;
+  rtx base_rtx = stack_pointer_rtx;
+  HOST_WIDE_INT start_offset = offset;
+  HOST_WIDE_INT increment = UNITS_PER_WORD;
+  rtx (*gen_mem_ref)(enum machine_mode, rtx) = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM;
+  unsigned limit = (frame_pointer_needed)? R28_REGNUM: R30_REGNUM;
+  unsigned regno;
+  unsigned regno2;
+
+  for (regno = R0_REGNUM; regno <= limit; regno++)
+    {
+      if (aarch64_register_saved_on_entry (regno))
+       {
+         rtx mem;
+         mem = gen_mem_ref (Pmode,
+                            plus_constant (Pmode,
+                                           base_rtx,
+                                           start_offset));
+
+         for (regno2 = regno + 1;
+              regno2 <= limit
+                && !aarch64_register_saved_on_entry (regno2);
+              regno2++)
+           {
+             /* Empty loop.  */
+           }
+         if (regno2 <= limit &&
+             aarch64_register_saved_on_entry (regno2))
+           {
+             rtx mem2;
+             /* Next highest register to be saved.  */
+             mem2 = gen_mem_ref (Pmode,
+                                 plus_constant
+                                 (Pmode,
+                                  base_rtx,
+                                  start_offset + increment));
+             if (restore == false)
+               {
+                 insn = emit_insn
+                   ( gen_store_pairdi (mem, gen_rtx_REG (DImode, regno),
+                                       mem2, gen_rtx_REG (DImode, regno2)));
+
+               }
+             else
+               {
+                 insn = emit_insn
+                   ( gen_load_pairdi (gen_rtx_REG (DImode, regno), mem,
+                                    gen_rtx_REG (DImode, regno2), mem2));
+
+                 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
+                 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno2));
+               }
+
+                 /* The first part of a frame-related parallel insn
+                    is always assumed to be relevant to the frame
+                    calculations; subsequent parts, are only
+                    frame-related if explicitly marked.  */
+             RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0,
+                                           1)) = 1;
+             regno = regno2;
+             start_offset += increment * 2;
+           }
+         else
+           {
+             if (restore == false)
+               insn = emit_move_insn (mem, gen_rtx_REG (DImode, regno));
+             else
+               {
+                 insn = emit_move_insn (gen_rtx_REG (DImode, regno), mem);
+                 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
+               }
+             start_offset += increment;
+           }
+         RTX_FRAME_RELATED_P (insn) = 1;
+       }
+    }
+
+  aarch64_save_or_restore_fprs (start_offset, increment, restore, base_rtx);
+
+}
+
+/* AArch64 stack frames generated by this compiler look like:
+
+       +-------------------------------+
+       |                               |
+       |  incoming stack arguments     |
+       |                               |
+       +-------------------------------+ <-- arg_pointer_rtx
+       |                               |
+       |  callee-allocated save area   |
+       |  for register varargs         |
+       |                               |
+       +-------------------------------+
+       |                               |
+       |  local variables              |
+       |                               |
+       +-------------------------------+ <-- frame_pointer_rtx
+       |                               |
+       |  callee-saved registers       |
+       |                               |
+       +-------------------------------+
+       |  LR'                          |
+       +-------------------------------+
+       |  FP'                          |
+      P +-------------------------------+ <-- hard_frame_pointer_rtx
+       |  dynamic allocation           |
+       +-------------------------------+
+       |                               |
+       |  outgoing stack arguments     |
+       |                               |
+       +-------------------------------+ <-- stack_pointer_rtx
+
+   Dynamic stack allocations such as alloca insert data at point P.
+   They decrease stack_pointer_rtx but leave frame_pointer_rtx and
+   hard_frame_pointer_rtx unchanged.  */
+
+/* Generate the prologue instructions for entry into a function.
+   Establish the stack frame by decreasing the stack pointer with a
+   properly calculated size and, if necessary, create a frame record
+   filled with the values of LR and previous frame pointer.  The
+   current FP is also set up is it is in use.  */
+
+void
+aarch64_expand_prologue (void)
+{
+  /* sub sp, sp, #<frame_size>
+     stp {fp, lr}, [sp, #<frame_size> - 16]
+     add fp, sp, #<frame_size> - hardfp_offset
+     stp {cs_reg}, [fp, #-16] etc.
+
+     sub sp, sp, <final_adjustment_if_any>
+  */
+  HOST_WIDE_INT original_frame_size;   /* local variables + vararg save */
+  HOST_WIDE_INT frame_size, offset;
+  HOST_WIDE_INT fp_offset;             /* FP offset from SP */
+  rtx insn;
+
+  aarch64_layout_frame ();
+  original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
+  gcc_assert ((!cfun->machine->saved_varargs_size || cfun->stdarg)
+             && (cfun->stdarg || !cfun->machine->saved_varargs_size));
+  frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size
+               + crtl->outgoing_args_size);
+  offset = frame_size = AARCH64_ROUND_UP (frame_size,
+                                         STACK_BOUNDARY / BITS_PER_UNIT);
+
+  if (flag_stack_usage_info)
+    current_function_static_stack_size = frame_size;
+
+  fp_offset = (offset
+              - original_frame_size
+              - cfun->machine->frame.saved_regs_size);
+
+  /* Store pairs and load pairs have a range only of +/- 512.  */
+  if (offset >= 512)
+    {
+      /* When the frame has a large size, an initial decrease is done on
+        the stack pointer to jump over the callee-allocated save area for
+        register varargs, the local variable area and/or the callee-saved
+        register area.  This will allow the pre-index write-back
+        store pair instructions to be used for setting up the stack frame
+        efficiently.  */
+      offset = original_frame_size + cfun->machine->frame.saved_regs_size;
+      if (offset >= 512)
+       offset = cfun->machine->frame.saved_regs_size;
+
+      frame_size -= (offset + crtl->outgoing_args_size);
+      fp_offset = 0;
+
+      if (frame_size >= 0x1000000)
+       {
+         rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM);
+         emit_move_insn (op0, GEN_INT (-frame_size));
+         emit_insn (gen_add2_insn (stack_pointer_rtx, op0));
+         aarch64_set_frame_expr (gen_rtx_SET
+                                 (Pmode, stack_pointer_rtx,
+                                  gen_rtx_PLUS (Pmode,
+                                                stack_pointer_rtx,
+                                                GEN_INT (-frame_size))));
+       }
+      else if (frame_size > 0)
+       {
+         if ((frame_size & 0xfff) != frame_size)
+           {
+             insn = emit_insn (gen_add2_insn
+                               (stack_pointer_rtx,
+                                GEN_INT (-(frame_size
+                                           & ~(HOST_WIDE_INT)0xfff))));
+             RTX_FRAME_RELATED_P (insn) = 1;
+           }
+         if ((frame_size & 0xfff) != 0)
+           {
+             insn = emit_insn (gen_add2_insn
+                               (stack_pointer_rtx,
+                                GEN_INT (-(frame_size
+                                           & (HOST_WIDE_INT)0xfff))));
+             RTX_FRAME_RELATED_P (insn) = 1;
+           }
+       }
+    }
+  else
+    frame_size = -1;
+
+  if (offset > 0)
+    {
+      /* Save the frame pointer and lr if the frame pointer is needed
+        first.  Make the frame pointer point to the location of the
+        old frame pointer on the stack.  */
+      if (frame_pointer_needed)
+       {
+         rtx mem_fp, mem_lr;
+
+         if (fp_offset)
+           {
+             insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
+                                              GEN_INT (-offset)));
+             RTX_FRAME_RELATED_P (insn) = 1;
+             aarch64_set_frame_expr (gen_rtx_SET
+                                     (Pmode, stack_pointer_rtx,
+                                      gen_rtx_MINUS (Pmode,
+                                                     stack_pointer_rtx,
+                                                     GEN_INT (offset))));
+             mem_fp = gen_frame_mem (DImode,
+                                     plus_constant (Pmode,
+                                                    stack_pointer_rtx,
+                                                    fp_offset));
+             mem_lr = gen_frame_mem (DImode,
+                                     plus_constant (Pmode,
+                                                    stack_pointer_rtx,
+                                                    fp_offset
+                                                    + UNITS_PER_WORD));
+             insn = emit_insn (gen_store_pairdi (mem_fp,
+                                                 hard_frame_pointer_rtx,
+                                                 mem_lr,
+                                                 gen_rtx_REG (DImode,
+                                                              LR_REGNUM)));
+           }
+         else
+           {
+             insn = emit_insn (gen_storewb_pairdi_di
+                               (stack_pointer_rtx, stack_pointer_rtx,
+                                hard_frame_pointer_rtx,
+                                gen_rtx_REG (DImode, LR_REGNUM),
+                                GEN_INT (-offset),
+                                GEN_INT (GET_MODE_SIZE (DImode) - offset)));
+             RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1;
+           }
+
+         /* The first part of a frame-related parallel insn is always
+            assumed to be relevant to the frame calculations;
+            subsequent parts, are only frame-related if explicitly
+            marked.  */
+         RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
+         RTX_FRAME_RELATED_P (insn) = 1;
+
+         /* Set up frame pointer to point to the location of the
+            previous frame pointer on the stack.  */
+         insn = emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
+                                          stack_pointer_rtx,
+                                          GEN_INT (fp_offset)));
+         aarch64_set_frame_expr (gen_rtx_SET
+                                 (Pmode, hard_frame_pointer_rtx,
+                                  gen_rtx_PLUS (Pmode,
+                                                stack_pointer_rtx,
+                                                GEN_INT (fp_offset))));
+         RTX_FRAME_RELATED_P (insn) = 1;
+         insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
+                                          hard_frame_pointer_rtx));
+       }
+      else
+       {
+         insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
+                                          GEN_INT (-offset)));
+         RTX_FRAME_RELATED_P (insn) = 1;
+       }
+
+      aarch64_save_or_restore_callee_save_registers
+       (fp_offset + cfun->machine->frame.hardfp_offset, 0);
+    }
+
+  /* when offset >= 512,
+     sub sp, sp, #<outgoing_args_size> */
+  if (frame_size > -1)
+    {
+      if (crtl->outgoing_args_size > 0)
+       {
+         insn = emit_insn (gen_add2_insn
+                           (stack_pointer_rtx,
+                            GEN_INT (- crtl->outgoing_args_size)));
+         RTX_FRAME_RELATED_P (insn) = 1;
+       }
+    }
+}
+
+/* Generate the epilogue instructions for returning from a function.  */
+void
+aarch64_expand_epilogue (bool for_sibcall)
+{
+  HOST_WIDE_INT original_frame_size, frame_size, offset;
+  HOST_WIDE_INT fp_offset;
+  rtx insn;
+
+  aarch64_layout_frame ();
+  original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
+  frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size
+               + crtl->outgoing_args_size);
+  offset = frame_size = AARCH64_ROUND_UP (frame_size,
+                                         STACK_BOUNDARY / BITS_PER_UNIT);
+
+  fp_offset = (offset
+              - original_frame_size
+              - cfun->machine->frame.saved_regs_size);
+
+  /* Store pairs and load pairs have a range only of +/- 512.  */
+  if (offset >= 512)
+    {
+      offset = original_frame_size + cfun->machine->frame.saved_regs_size;
+      if (offset >= 512)
+       offset = cfun->machine->frame.saved_regs_size;
+
+      frame_size -= (offset + crtl->outgoing_args_size);
+      fp_offset = 0;
+      if (!frame_pointer_needed && crtl->outgoing_args_size > 0)
+       {
+         insn = emit_insn (gen_add2_insn
+                           (stack_pointer_rtx,
+                            GEN_INT (crtl->outgoing_args_size)));
+         RTX_FRAME_RELATED_P (insn) = 1;
+       }
+    }
+  else
+    frame_size = -1;
+
+  /* If there were outgoing arguments or we've done dynamic stack
+     allocation, then restore the stack pointer from the frame
+     pointer.  This is at most one insn and more efficient than using
+     GCC's internal mechanism.  */
+  if (frame_pointer_needed
+      && (crtl->outgoing_args_size || cfun->calls_alloca))
+    {
+      insn = emit_insn (gen_add3_insn (stack_pointer_rtx,
+                                      hard_frame_pointer_rtx,
+                                      GEN_INT (- fp_offset)));
+      RTX_FRAME_RELATED_P (insn) = 1;
+    }
+
+  aarch64_save_or_restore_callee_save_registers
+    (fp_offset + cfun->machine->frame.hardfp_offset, 1);
+
+  /* Restore the frame pointer and lr if the frame pointer is needed.  */
+  if (offset > 0)
+    {
+      if (frame_pointer_needed)
+       {
+         rtx mem_fp, mem_lr;
+
+         if (fp_offset)
+           {
+             mem_fp = gen_frame_mem (DImode,
+                                     plus_constant (Pmode,
+                                                    stack_pointer_rtx,
+                                                    fp_offset));
+             mem_lr = gen_frame_mem (DImode,
+                                     plus_constant (Pmode,
+                                                    stack_pointer_rtx,
+                                                    fp_offset
+                                                    + UNITS_PER_WORD));
+             insn = emit_insn (gen_load_pairdi (hard_frame_pointer_rtx,
+                                                mem_fp,
+                                                gen_rtx_REG (DImode,
+                                                             LR_REGNUM),
+                                                mem_lr));
+           }
+         else
+           {
+             insn = emit_insn (gen_loadwb_pairdi_di
+                               (stack_pointer_rtx,
+                                stack_pointer_rtx,
+                                hard_frame_pointer_rtx,
+                                gen_rtx_REG (DImode, LR_REGNUM),
+                                GEN_INT (offset),
+                                GEN_INT (GET_MODE_SIZE (DImode) + offset)));
+             RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1;
+             aarch64_set_frame_expr (gen_rtx_SET
+                                     (Pmode,
+                                      stack_pointer_rtx,
+                                      gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+                                                    GEN_INT (offset))));
+           }
+
+         /* The first part of a frame-related parallel insn
+            is always assumed to be relevant to the frame
+            calculations; subsequent parts, are only
+            frame-related if explicitly marked.  */
+         RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
+         RTX_FRAME_RELATED_P (insn) = 1;
+         add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
+         add_reg_note (insn, REG_CFA_RESTORE,
+                       gen_rtx_REG (DImode, LR_REGNUM));
+
+         if (fp_offset)
+           {
+             insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
+                                              GEN_INT (offset)));
+             RTX_FRAME_RELATED_P (insn) = 1;
+           }
+       }
+
+      else
+       {
+         insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
+                                          GEN_INT (offset)));
+         RTX_FRAME_RELATED_P (insn) = 1;
+       }
+    }
+
+  /* Stack adjustment for exception handler.  */
+  if (crtl->calls_eh_return)
+    {
+      /* We need to unwind the stack by the offset computed by
+        EH_RETURN_STACKADJ_RTX.  However, at this point the CFA is
+        based on SP.  Ideally we would update the SP and define the
+        CFA along the lines of:
+
+        SP = SP + EH_RETURN_STACKADJ_RTX
+        (regnote CFA = SP - EH_RETURN_STACKADJ_RTX)
+
+        However the dwarf emitter only understands a constant
+        register offset.
+
+        The solution choosen here is to use the otherwise unused IP0
+        as a temporary register to hold the current SP value.  The
+        CFA is described using IP0 then SP is modified.  */
+
+      rtx ip0 = gen_rtx_REG (DImode, IP0_REGNUM);
+
+      insn = emit_move_insn (ip0, stack_pointer_rtx);
+      add_reg_note (insn, REG_CFA_DEF_CFA, ip0);
+      RTX_FRAME_RELATED_P (insn) = 1;
+
+      emit_insn (gen_add2_insn (stack_pointer_rtx, EH_RETURN_STACKADJ_RTX));
+
+      /* Ensure the assignment to IP0 does not get optimized away.  */
+      emit_use (ip0);
+    }
+
+  if (frame_size > -1)
+    {
+      if (frame_size >= 0x1000000)
+       {
+         rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM);
+         emit_move_insn (op0, GEN_INT (frame_size));
+         emit_insn (gen_add2_insn (stack_pointer_rtx, op0));
+         aarch64_set_frame_expr (gen_rtx_SET
+                                 (Pmode, stack_pointer_rtx,
+                                  gen_rtx_PLUS (Pmode,
+                                                stack_pointer_rtx,
+                                                GEN_INT (frame_size))));
+       }
+      else if (frame_size > 0)
+       {
+         if ((frame_size & 0xfff) != 0)
+           {
+             insn = emit_insn (gen_add2_insn
+                               (stack_pointer_rtx,
+                                GEN_INT ((frame_size
+                                          & (HOST_WIDE_INT) 0xfff))));
+             RTX_FRAME_RELATED_P (insn) = 1;
+           }
+         if ((frame_size & 0xfff) != frame_size)
+           {
+             insn = emit_insn (gen_add2_insn
+                               (stack_pointer_rtx,
+                                GEN_INT ((frame_size
+                                          & ~ (HOST_WIDE_INT) 0xfff))));
+             RTX_FRAME_RELATED_P (insn) = 1;
+           }
+       }
+
+      aarch64_set_frame_expr (gen_rtx_SET (Pmode, stack_pointer_rtx,
+                                          gen_rtx_PLUS (Pmode,
+                                                        stack_pointer_rtx,
+                                                        GEN_INT (offset))));
+    }
+
+  emit_use (gen_rtx_REG (DImode, LR_REGNUM));
+  if (!for_sibcall)
+    emit_jump_insn (ret_rtx);
+}
+
+/* Return the place to copy the exception unwinding return address to.
+   This will probably be a stack slot, but could (in theory be the
+   return register).  */
+rtx
+aarch64_final_eh_return_addr (void)
+{
+  HOST_WIDE_INT original_frame_size, frame_size, offset, fp_offset;
+  aarch64_layout_frame ();
+  original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
+  frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size
+               + crtl->outgoing_args_size);
+  offset = frame_size = AARCH64_ROUND_UP (frame_size,
+                                         STACK_BOUNDARY / BITS_PER_UNIT);
+  fp_offset = offset
+    - original_frame_size
+    - cfun->machine->frame.saved_regs_size;
+
+  if (cfun->machine->frame.reg_offset[LR_REGNUM] < 0)
+    return gen_rtx_REG (DImode, LR_REGNUM);
+
+  /* DSE and CSELIB do not detect an alias between sp+k1 and fp+k2.  This can
+     result in a store to save LR introduced by builtin_eh_return () being
+     incorrectly deleted because the alias is not detected.
+     So in the calculation of the address to copy the exception unwinding
+     return address to, we note 2 cases.
+     If FP is needed and the fp_offset is 0, it means that SP = FP and hence
+     we return a SP-relative location since all the addresses are SP-relative
+     in this case.  This prevents the store from being optimized away.
+     If the fp_offset is not 0, then the addresses will be FP-relative and
+     therefore we return a FP-relative location.  */
+
+  if (frame_pointer_needed)
+    {
+      if (fp_offset)
+        return gen_frame_mem (DImode,
+                             plus_constant (Pmode, hard_frame_pointer_rtx, UNITS_PER_WORD));
+      else
+        return gen_frame_mem (DImode,
+                             plus_constant (Pmode, stack_pointer_rtx, UNITS_PER_WORD));
+    }
+
+  /* If FP is not needed, we calculate the location of LR, which would be
+     at the top of the saved registers block.  */
+
+  return gen_frame_mem (DImode,
+                       plus_constant (Pmode,
+                                      stack_pointer_rtx,
+                                      fp_offset
+                                      + cfun->machine->frame.saved_regs_size
+                                      - 2 * UNITS_PER_WORD));
+}
+
+/* Output code to build up a constant in a register.  */
+static void
+aarch64_build_constant (FILE *file,
+                       int regnum,
+                       HOST_WIDE_INT val)
+{
+  if (aarch64_bitmask_imm (val, DImode))
+    asm_fprintf (file, "\tmovi\t%r, %wd\n", regnum, val);
+  else
+    {
+      int i;
+      int ncount = 0;
+      int zcount = 0;
+      HOST_WIDE_INT valp = val >> 16;
+      HOST_WIDE_INT valm;
+      HOST_WIDE_INT tval;
+
+      for (i = 16; i < 64; i += 16)
+       {
+         valm = (valp & 0xffff);
+
+         if (valm != 0)
+           ++ zcount;
+
+         if (valm != 0xffff)
+           ++ ncount;
+
+         valp >>= 16;
+       }
+
+      /* zcount contains the number of additional MOVK instructions
+        required if the constant is built up with an initial MOVZ instruction,
+        while ncount is the number of MOVK instructions required if starting
+        with a MOVN instruction.  Choose the sequence that yields the fewest
+        number of instructions, preferring MOVZ instructions when they are both
+        the same.  */
+      if (ncount < zcount)
+       {
+         asm_fprintf (file, "\tmovn\t%r, %wd\n", regnum, (~val) & 0xffff);
+         tval = 0xffff;
+       }
+      else
+       {
+         asm_fprintf (file, "\tmovz\t%r, %wd\n", regnum, val & 0xffff);
+         tval = 0;
+       }
+
+      val >>= 16;
+
+      for (i = 16; i < 64; i += 16)
+       {
+         if ((val & 0xffff) != tval)
+           asm_fprintf (file, "\tmovk\t%r, %wd, lsl %d\n",
+                        regnum, val & 0xffff, i);
+         val >>= 16;
+       }
+    }
+}
+
+static void
+aarch64_add_constant (FILE *file, int regnum, int scratchreg,
+                            HOST_WIDE_INT delta)
+{
+  HOST_WIDE_INT mdelta = delta;
+
+  if (mdelta < 0)
+    mdelta = -mdelta;
+
+  if (mdelta >= 4096 * 4096)
+    {
+      aarch64_build_constant (file, scratchreg, delta);
+      asm_fprintf (file, "\tadd\t%r, %r, %r\n", regnum, regnum,
+                  scratchreg);
+    }
+  else if (mdelta > 0)
+    {
+      const char *const mi_op = delta < 0 ? "sub" : "add";
+
+      if (mdelta >= 4096)
+       asm_fprintf (file, "\t%s\t%r, %r, %wd, lsl 12\n", mi_op, regnum, regnum,
+                    mdelta / 4096);
+
+      if (mdelta % 4096 != 0)
+       asm_fprintf (file, "\t%s\t%r, %r, %wd\n", mi_op, regnum, regnum,
+                    mdelta % 4096);
+    }
+}
+
+/* Output code to add DELTA to the first argument, and then jump
+   to FUNCTION.  Used for C++ multiple inheritance.  */
+static void
+aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
+                        HOST_WIDE_INT delta,
+                        HOST_WIDE_INT vcall_offset,
+                        tree function)
+{
+  /* The this pointer is always in x0.  Note that this differs from
+     Arm where the this pointer maybe bumped to r1 if r0 is required
+     to return a pointer to an aggregate.  On AArch64 a result value
+     pointer will be in x8.  */
+  int this_regno = R0_REGNUM;
+
+  /* Make sure unwind info is emitted for the thunk if needed.  */
+  final_start_function (emit_barrier (), file, 1);
+
+  if (vcall_offset == 0)
+    aarch64_add_constant (file, this_regno, IP1_REGNUM, delta);
+  else
+    {
+      gcc_assert ((vcall_offset & 0x7) == 0);
+
+      if (delta == 0)
+       asm_fprintf (file, "\tldr\t%r, [%r]\n", IP0_REGNUM, this_regno);
+      else if (delta >= -256 && delta < 256)
+       asm_fprintf (file, "\tldr\t%r, [%r,%wd]!\n", IP0_REGNUM, this_regno,
+                    delta);
+      else
+       {
+         aarch64_add_constant (file, this_regno, IP1_REGNUM, delta);
+
+         asm_fprintf (file, "\tldr\t%r, [%r]\n", IP0_REGNUM, this_regno);
+       }
+
+      if (vcall_offset >= -256 && vcall_offset < 32768)
+       asm_fprintf (file, "\tldr\t%r, [%r,%wd]\n", IP1_REGNUM, IP0_REGNUM,
+                    vcall_offset);
+      else
+       {
+         aarch64_build_constant (file, IP1_REGNUM, vcall_offset);
+         asm_fprintf (file, "\tldr\t%r, [%r,%r]\n", IP1_REGNUM, IP0_REGNUM,
+                      IP1_REGNUM);
+       }
+
+      asm_fprintf (file, "\tadd\t%r, %r, %r\n", this_regno, this_regno,
+                  IP1_REGNUM);
+    }
+
+  output_asm_insn ("b\t%a0", &XEXP (DECL_RTL (function), 0));
+  final_end_function ();
+}
+
+\f
+static int
+aarch64_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
+{
+  if (GET_CODE (*x) == SYMBOL_REF)
+    return SYMBOL_REF_TLS_MODEL (*x) != 0;
+
+  /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
+     TLS offsets, not real symbol references.  */
+  if (GET_CODE (*x) == UNSPEC
+      && XINT (*x, 1) == UNSPEC_TLS)
+    return -1;
+
+  return 0;
+}
+
+static bool
+aarch64_tls_referenced_p (rtx x)
+{
+  if (!TARGET_HAVE_TLS)
+    return false;
+
+  return for_each_rtx (&x, aarch64_tls_operand_p_1, NULL);
+}
+
+
+static int
+aarch64_bitmasks_cmp (const void *i1, const void *i2)
+{
+  const unsigned HOST_WIDE_INT *imm1 = (const unsigned HOST_WIDE_INT *) i1;
+  const unsigned HOST_WIDE_INT *imm2 = (const unsigned HOST_WIDE_INT *) i2;
+
+  if (*imm1 < *imm2)
+    return -1;
+  if (*imm1 > *imm2)
+    return +1;
+  return 0;
+}
+
+
+static void
+aarch64_build_bitmask_table (void)
+{
+  unsigned HOST_WIDE_INT mask, imm;
+  unsigned int log_e, e, s, r;
+  unsigned int nimms = 0;
+
+  for (log_e = 1; log_e <= 6; log_e++)
+    {
+      e = 1 << log_e;
+      if (e == 64)
+       mask = ~(HOST_WIDE_INT) 0;
+      else
+       mask = ((HOST_WIDE_INT) 1 << e) - 1;
+      for (s = 1; s < e; s++)
+       {
+         for (r = 0; r < e; r++)
+           {
+             /* set s consecutive bits to 1 (s < 64) */
+             imm = ((unsigned HOST_WIDE_INT)1 << s) - 1;
+             /* rotate right by r */
+             if (r != 0)
+               imm = ((imm >> r) | (imm << (e - r))) & mask;
+             /* replicate the constant depending on SIMD size */
+             switch (log_e) {
+             case 1: imm |= (imm <<  2);
+             case 2: imm |= (imm <<  4);
+             case 3: imm |= (imm <<  8);
+             case 4: imm |= (imm << 16);
+             case 5: imm |= (imm << 32);
+             case 6:
+               break;
+             default:
+               gcc_unreachable ();
+             }
+             gcc_assert (nimms < AARCH64_NUM_BITMASKS);
+             aarch64_bitmasks[nimms++] = imm;
+           }
+       }
+    }
+
+  gcc_assert (nimms == AARCH64_NUM_BITMASKS);
+  qsort (aarch64_bitmasks, nimms, sizeof (aarch64_bitmasks[0]),
+        aarch64_bitmasks_cmp);
+}
+
+
+/* Return true if val can be encoded as a 12-bit unsigned immediate with
+   a left shift of 0 or 12 bits.  */
+bool
+aarch64_uimm12_shift (HOST_WIDE_INT val)
+{
+  return ((val & (((HOST_WIDE_INT) 0xfff) << 0)) == val
+         || (val & (((HOST_WIDE_INT) 0xfff) << 12)) == val
+         );
+}
+
+
+/* Return true if val is an immediate that can be loaded into a
+   register by a MOVZ instruction.  */
+static bool
+aarch64_movw_imm (HOST_WIDE_INT val, enum machine_mode mode)
+{
+  if (GET_MODE_SIZE (mode) > 4)
+    {
+      if ((val & (((HOST_WIDE_INT) 0xffff) << 32)) == val
+         || (val & (((HOST_WIDE_INT) 0xffff) << 48)) == val)
+       return 1;
+    }
+  else
+    {
+      /* Ignore sign extension.  */
+      val &= (HOST_WIDE_INT) 0xffffffff;
+    }
+  return ((val & (((HOST_WIDE_INT) 0xffff) << 0)) == val
+         || (val & (((HOST_WIDE_INT) 0xffff) << 16)) == val);
+}
+
+
+/* Return true if val is a valid bitmask immediate.  */
+bool
+aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode mode)
+{
+  if (GET_MODE_SIZE (mode) < 8)
+    {
+      /* Replicate bit pattern.  */
+      val &= (HOST_WIDE_INT) 0xffffffff;
+      val |= val << 32;
+    }
+  return bsearch (&val, aarch64_bitmasks, AARCH64_NUM_BITMASKS,
+                 sizeof (aarch64_bitmasks[0]), aarch64_bitmasks_cmp) != NULL;
+}
+
+
+/* Return true if val is an immediate that can be loaded into a
+   register in a single instruction.  */
+bool
+aarch64_move_imm (HOST_WIDE_INT val, enum machine_mode mode)
+{
+  if (aarch64_movw_imm (val, mode) || aarch64_movw_imm (~val, mode))
+    return 1;
+  return aarch64_bitmask_imm (val, mode);
+}
+
+static bool
+aarch64_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
+{
+  rtx base, offset;
+  if (GET_CODE (x) == HIGH)
+    return true;
+
+  split_const (x, &base, &offset);
+  if (GET_CODE (base) == SYMBOL_REF || GET_CODE (base) == LABEL_REF)
+    return (aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR) != SYMBOL_FORCE_TO_MEM);
+
+  return aarch64_tls_referenced_p (x);
+}
+
+/* Return true if register REGNO is a valid index register.
+   STRICT_P is true if REG_OK_STRICT is in effect.  */
+
+bool
+aarch64_regno_ok_for_index_p (int regno, bool strict_p)
+{
+  if (!HARD_REGISTER_NUM_P (regno))
+    {
+      if (!strict_p)
+       return true;
+
+      if (!reg_renumber)
+       return false;
+
+      regno = reg_renumber[regno];
+    }
+  return GP_REGNUM_P (regno);
+}
+
+/* Return true if register REGNO is a valid base register for mode MODE.
+   STRICT_P is true if REG_OK_STRICT is in effect.  */
+
+bool
+aarch64_regno_ok_for_base_p (int regno, bool strict_p)
+{
+  if (!HARD_REGISTER_NUM_P (regno))
+    {
+      if (!strict_p)
+       return true;
+
+      if (!reg_renumber)
+       return false;
+
+      regno = reg_renumber[regno];
+    }
+
+  /* The fake registers will be eliminated to either the stack or
+     hard frame pointer, both of which are usually valid base registers.
+     Reload deals with the cases where the eliminated form isn't valid.  */
+  return (GP_REGNUM_P (regno)
+         || regno == SP_REGNUM
+         || regno == FRAME_POINTER_REGNUM
+         || regno == ARG_POINTER_REGNUM);
+}
+
+/* Return true if X is a valid base register for mode MODE.
+   STRICT_P is true if REG_OK_STRICT is in effect.  */
+
+static bool
+aarch64_base_register_rtx_p (rtx x, bool strict_p)
+{
+  if (!strict_p && GET_CODE (x) == SUBREG)
+    x = SUBREG_REG (x);
+
+  return (REG_P (x) && aarch64_regno_ok_for_base_p (REGNO (x), strict_p));
+}
+
+/* Return true if address offset is a valid index.  If it is, fill in INFO
+   appropriately.  STRICT_P is true if REG_OK_STRICT is in effect.  */
+
+static bool
+aarch64_classify_index (struct aarch64_address_info *info, rtx x,
+                       enum machine_mode mode, bool strict_p)
+{
+  enum aarch64_address_type type;
+  rtx index;
+  int shift;
+
+  /* (reg:P) */
+  if ((REG_P (x) || GET_CODE (x) == SUBREG)
+      && GET_MODE (x) == Pmode)
+    {
+      type = ADDRESS_REG_REG;
+      index = x;
+      shift = 0;
+    }
+  /* (sign_extend:DI (reg:SI)) */
+  else if ((GET_CODE (x) == SIGN_EXTEND
+           || GET_CODE (x) == ZERO_EXTEND)
+          && GET_MODE (x) == DImode
+          && GET_MODE (XEXP (x, 0)) == SImode)
+    {
+      type = (GET_CODE (x) == SIGN_EXTEND)
+       ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
+      index = XEXP (x, 0);
+      shift = 0;
+    }
+  /* (mult:DI (sign_extend:DI (reg:SI)) (const_int scale)) */
+  else if (GET_CODE (x) == MULT
+          && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
+              || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
+          && GET_MODE (XEXP (x, 0)) == DImode
+          && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode
+          && CONST_INT_P (XEXP (x, 1)))
+    {
+      type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
+       ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
+      index = XEXP (XEXP (x, 0), 0);
+      shift = exact_log2 (INTVAL (XEXP (x, 1)));
+    }
+  /* (ashift:DI (sign_extend:DI (reg:SI)) (const_int shift)) */
+  else if (GET_CODE (x) == ASHIFT
+          && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
+              || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
+          && GET_MODE (XEXP (x, 0)) == DImode
+          && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode
+          && CONST_INT_P (XEXP (x, 1)))
+    {
+      type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
+       ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
+      index = XEXP (XEXP (x, 0), 0);
+      shift = INTVAL (XEXP (x, 1));
+    }
+  /* (sign_extract:DI (mult:DI (reg:DI) (const_int scale)) 32+shift 0) */
+  else if ((GET_CODE (x) == SIGN_EXTRACT
+           || GET_CODE (x) == ZERO_EXTRACT)
+          && GET_MODE (x) == DImode
+          && GET_CODE (XEXP (x, 0)) == MULT
+          && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
+          && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
+    {
+      type = (GET_CODE (x) == SIGN_EXTRACT)
+       ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
+      index = XEXP (XEXP (x, 0), 0);
+      shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)));
+      if (INTVAL (XEXP (x, 1)) != 32 + shift
+         || INTVAL (XEXP (x, 2)) != 0)
+       shift = -1;
+    }
+  /* (and:DI (mult:DI (reg:DI) (const_int scale))
+     (const_int 0xffffffff<<shift)) */
+  else if (GET_CODE (x) == AND
+          && GET_MODE (x) == DImode
+          && GET_CODE (XEXP (x, 0)) == MULT
+          && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
+          && CONST_INT_P (XEXP (XEXP (x, 0), 1))
+          && CONST_INT_P (XEXP (x, 1)))
+    {
+      type = ADDRESS_REG_UXTW;
+      index = XEXP (XEXP (x, 0), 0);
+      shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)));
+      if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift)
+       shift = -1;
+    }
+  /* (sign_extract:DI (ashift:DI (reg:DI) (const_int shift)) 32+shift 0) */
+  else if ((GET_CODE (x) == SIGN_EXTRACT
+           || GET_CODE (x) == ZERO_EXTRACT)
+          && GET_MODE (x) == DImode
+          && GET_CODE (XEXP (x, 0)) == ASHIFT
+          && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
+          && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
+    {
+      type = (GET_CODE (x) == SIGN_EXTRACT)
+       ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
+      index = XEXP (XEXP (x, 0), 0);
+      shift = INTVAL (XEXP (XEXP (x, 0), 1));
+      if (INTVAL (XEXP (x, 1)) != 32 + shift
+         || INTVAL (XEXP (x, 2)) != 0)
+       shift = -1;
+    }
+  /* (and:DI (ashift:DI (reg:DI) (const_int shift))
+     (const_int 0xffffffff<<shift)) */
+  else if (GET_CODE (x) == AND
+          && GET_MODE (x) == DImode
+          && GET_CODE (XEXP (x, 0)) == ASHIFT
+          && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
+          && CONST_INT_P (XEXP (XEXP (x, 0), 1))
+          && CONST_INT_P (XEXP (x, 1)))
+    {
+      type = ADDRESS_REG_UXTW;
+      index = XEXP (XEXP (x, 0), 0);
+      shift = INTVAL (XEXP (XEXP (x, 0), 1));
+      if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift)
+       shift = -1;
+    }
+  /* (mult:P (reg:P) (const_int scale)) */
+  else if (GET_CODE (x) == MULT
+          && GET_MODE (x) == Pmode
+          && GET_MODE (XEXP (x, 0)) == Pmode
+          && CONST_INT_P (XEXP (x, 1)))
+    {
+      type = ADDRESS_REG_REG;
+      index = XEXP (x, 0);
+      shift = exact_log2 (INTVAL (XEXP (x, 1)));
+    }
+  /* (ashift:P (reg:P) (const_int shift)) */
+  else if (GET_CODE (x) == ASHIFT
+          && GET_MODE (x) == Pmode
+          && GET_MODE (XEXP (x, 0)) == Pmode
+          && CONST_INT_P (XEXP (x, 1)))
+    {
+      type = ADDRESS_REG_REG;
+      index = XEXP (x, 0);
+      shift = INTVAL (XEXP (x, 1));
+    }
+  else
+    return false;
+
+  if (GET_CODE (index) == SUBREG)
+    index = SUBREG_REG (index);
+
+  if ((shift == 0 ||
+       (shift > 0 && shift <= 3
+       && (1 << shift) == GET_MODE_SIZE (mode)))
+      && REG_P (index)
+      && aarch64_regno_ok_for_index_p (REGNO (index), strict_p))
+    {
+      info->type = type;
+      info->offset = index;
+      info->shift = shift;
+      return true;
+    }
+
+  return false;
+}
+
+static inline bool
+offset_7bit_signed_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset)
+{
+  return (offset >= -64 * GET_MODE_SIZE (mode)
+         && offset < 64 * GET_MODE_SIZE (mode)
+         && offset % GET_MODE_SIZE (mode) == 0);
+}
+
+static inline bool
+offset_9bit_signed_unscaled_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+                              HOST_WIDE_INT offset)
+{
+  return offset >= -256 && offset < 256;
+}
+
+static inline bool
+offset_12bit_unsigned_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset)
+{
+  return (offset >= 0
+         && offset < 4096 * GET_MODE_SIZE (mode)
+         && offset % GET_MODE_SIZE (mode) == 0);
+}
+
+/* Return true if X is a valid address for machine mode MODE.  If it is,
+   fill in INFO appropriately.  STRICT_P is true if REG_OK_STRICT is in
+   effect.  OUTER_CODE is PARALLEL for a load/store pair.  */
+
+static bool
+aarch64_classify_address (struct aarch64_address_info *info,
+                         rtx x, enum machine_mode mode,
+                         RTX_CODE outer_code, bool strict_p)
+{
+  enum rtx_code code = GET_CODE (x);
+  rtx op0, op1;
+  bool allow_reg_index_p =
+    outer_code != PARALLEL && GET_MODE_SIZE(mode) != 16;
+
+  /* Don't support anything other than POST_INC or REG addressing for
+     AdvSIMD.  */
+  if (aarch64_vector_mode_p (mode)
+      && (code != POST_INC && code != REG))
+    return false;
+
+  switch (code)
+    {
+    case REG:
+    case SUBREG:
+      info->type = ADDRESS_REG_IMM;
+      info->base = x;
+      info->offset = const0_rtx;
+      return aarch64_base_register_rtx_p (x, strict_p);
+
+    case PLUS:
+      op0 = XEXP (x, 0);
+      op1 = XEXP (x, 1);
+      if (GET_MODE_SIZE (mode) != 0
+         && CONST_INT_P (op1)
+         && aarch64_base_register_rtx_p (op0, strict_p))
+       {
+         HOST_WIDE_INT offset = INTVAL (op1);
+
+         info->type = ADDRESS_REG_IMM;
+         info->base = op0;
+         info->offset = op1;
+
+         /* TImode and TFmode values are allowed in both pairs of X
+            registers and individual Q registers.  The available
+            address modes are:
+            X,X: 7-bit signed scaled offset
+            Q:   9-bit signed offset
+            We conservatively require an offset representable in either mode.
+          */
+         if (mode == TImode || mode == TFmode)
+           return (offset_7bit_signed_scaled_p (mode, offset)
+                   && offset_9bit_signed_unscaled_p (mode, offset));
+
+         if (outer_code == PARALLEL)
+           return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
+                   && offset_7bit_signed_scaled_p (mode, offset));
+         else
+           return (offset_9bit_signed_unscaled_p (mode, offset)
+                   || offset_12bit_unsigned_scaled_p (mode, offset));
+       }
+
+      if (allow_reg_index_p)
+       {
+         /* Look for base + (scaled/extended) index register.  */
+         if (aarch64_base_register_rtx_p (op0, strict_p)
+             && aarch64_classify_index (info, op1, mode, strict_p))
+           {
+             info->base = op0;
+             return true;
+           }
+         if (aarch64_base_register_rtx_p (op1, strict_p)
+             && aarch64_classify_index (info, op0, mode, strict_p))
+           {
+             info->base = op1;
+             return true;
+           }
+       }
+
+      return false;
+
+    case POST_INC:
+    case POST_DEC:
+    case PRE_INC:
+    case PRE_DEC:
+      info->type = ADDRESS_REG_WB;
+      info->base = XEXP (x, 0);
+      info->offset = NULL_RTX;
+      return aarch64_base_register_rtx_p (info->base, strict_p);
+
+    case POST_MODIFY:
+    case PRE_MODIFY:
+      info->type = ADDRESS_REG_WB;
+      info->base = XEXP (x, 0);
+      if (GET_CODE (XEXP (x, 1)) == PLUS
+         && CONST_INT_P (XEXP (XEXP (x, 1), 1))
+         && rtx_equal_p (XEXP (XEXP (x, 1), 0), info->base)
+         && aarch64_base_register_rtx_p (info->base, strict_p))
+       {
+         HOST_WIDE_INT offset;
+         info->offset = XEXP (XEXP (x, 1), 1);
+         offset = INTVAL (info->offset);
+
+         /* TImode and TFmode values are allowed in both pairs of X
+            registers and individual Q registers.  The available
+            address modes are:
+            X,X: 7-bit signed scaled offset
+            Q:   9-bit signed offset
+            We conservatively require an offset representable in either mode.
+          */
+         if (mode == TImode || mode == TFmode)
+           return (offset_7bit_signed_scaled_p (mode, offset)
+                   && offset_9bit_signed_unscaled_p (mode, offset));
+
+         if (outer_code == PARALLEL)
+           return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
+                   && offset_7bit_signed_scaled_p (mode, offset));
+         else
+           return offset_9bit_signed_unscaled_p (mode, offset);
+       }
+      return false;
+
+    case CONST:
+    case SYMBOL_REF:
+    case LABEL_REF:
+      /* load literal: pc-relative constant pool entry.  */
+      info->type = ADDRESS_SYMBOLIC;
+      if (outer_code != PARALLEL)
+       {
+         rtx sym, addend;
+
+         split_const (x, &sym, &addend);
+         return (GET_CODE (sym) == LABEL_REF
+                 || (GET_CODE (sym) == SYMBOL_REF
+                     && CONSTANT_POOL_ADDRESS_P (sym)));
+       }
+      return false;
+
+    case LO_SUM:
+      info->type = ADDRESS_LO_SUM;
+      info->base = XEXP (x, 0);
+      info->offset = XEXP (x, 1);
+      if (allow_reg_index_p
+         && aarch64_base_register_rtx_p (info->base, strict_p))
+       {
+         rtx sym, offs;
+         split_const (info->offset, &sym, &offs);
+         if (GET_CODE (sym) == SYMBOL_REF
+             && (aarch64_classify_symbol (sym, SYMBOL_CONTEXT_MEM)
+                 == SYMBOL_SMALL_ABSOLUTE))
+           {
+             /* The symbol and offset must be aligned to the access size.  */
+             unsigned int align;
+             unsigned int ref_size;
+
+             if (CONSTANT_POOL_ADDRESS_P (sym))
+               align = GET_MODE_ALIGNMENT (get_pool_mode (sym));
+             else if (TREE_CONSTANT_POOL_ADDRESS_P (sym))
+               {
+                 tree exp = SYMBOL_REF_DECL (sym);
+                 align = TYPE_ALIGN (TREE_TYPE (exp));
+                 align = CONSTANT_ALIGNMENT (exp, align);
+               }
+             else if (SYMBOL_REF_DECL (sym))
+               align = DECL_ALIGN (SYMBOL_REF_DECL (sym));
+             else
+               align = BITS_PER_UNIT;
+
+             ref_size = GET_MODE_SIZE (mode);
+             if (ref_size == 0)
+               ref_size = GET_MODE_SIZE (DImode);
+
+             return ((INTVAL (offs) & (ref_size - 1)) == 0
+                     && ((align / BITS_PER_UNIT) & (ref_size - 1)) == 0);
+           }
+       }
+      return false;
+
+    default:
+      return false;
+    }
+}
+
+bool
+aarch64_symbolic_address_p (rtx x)
+{
+  rtx offset;
+
+  split_const (x, &x, &offset);
+  return GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF;
+}
+
+/* Classify the base of symbolic expression X, given that X appears in
+   context CONTEXT.  */
+static enum aarch64_symbol_type
+aarch64_classify_symbolic_expression (rtx x, enum aarch64_symbol_context context)
+{
+  rtx offset;
+  split_const (x, &x, &offset);
+  return aarch64_classify_symbol (x, context);
+}
+
+
+/* Return TRUE if X is a legitimate address for accessing memory in
+   mode MODE.  */
+static bool
+aarch64_legitimate_address_hook_p (enum machine_mode mode, rtx x, bool strict_p)
+{
+  struct aarch64_address_info addr;
+
+  return aarch64_classify_address (&addr, x, mode, MEM, strict_p);
+}
+
+/* Return TRUE if X is a legitimate address for accessing memory in
+   mode MODE.  OUTER_CODE will be PARALLEL if this is a load/store
+   pair operation.  */
+bool
+aarch64_legitimate_address_p (enum machine_mode mode, rtx x,
+                          RTX_CODE outer_code, bool strict_p)
+{
+  struct aarch64_address_info addr;
+
+  return aarch64_classify_address (&addr, x, mode, outer_code, strict_p);
+}
+
+/* Return TRUE if rtx X is immediate constant 0.0 */
+bool
+aarch64_const_double_zero_rtx_p (rtx x)
+{
+  REAL_VALUE_TYPE r;
+
+  if (GET_MODE (x) == VOIDmode)
+    return false;
+
+  REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+  if (REAL_VALUE_MINUS_ZERO (r))
+    return !HONOR_SIGNED_ZEROS (GET_MODE (x));
+  return REAL_VALUES_EQUAL (r, dconst0);
+}
+
+enum machine_mode
+aarch64_select_cc_mode (RTX_CODE code, rtx x, rtx y)
+{
+  /* All floating point compares return CCFP if it is an equality
+     comparison, and CCFPE otherwise.  */
+  if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+    {
+      switch (code)
+       {
+       case EQ:
+       case NE:
+       case UNORDERED:
+       case ORDERED:
+       case UNLT:
+       case UNLE:
+       case UNGT:
+       case UNGE:
+       case UNEQ:
+       case LTGT:
+         return CCFPmode;
+
+       case LT:
+       case LE:
+       case GT:
+       case GE:
+         return CCFPEmode;
+
+       default:
+         gcc_unreachable ();
+       }
+    }
+
+  if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
+      && y == const0_rtx
+      && (code == EQ || code == NE || code == LT || code == GE)
+      && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS))
+    return CC_NZmode;
+
+  /* A compare with a shifted operand.  Because of canonicalization,
+     the comparison will have to be swapped when we emit the assembly
+     code.  */
+  if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
+      && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG)
+      && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
+         || GET_CODE (x) == LSHIFTRT
+         || GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND))
+    return CC_SWPmode;
+
+  /* A compare of a mode narrower than SI mode against zero can be done
+     by extending the value in the comparison.  */
+  if ((GET_MODE (x) == QImode || GET_MODE (x) == HImode)
+      && y == const0_rtx)
+    /* Only use sign-extension if we really need it.  */
+    return ((code == GT || code == GE || code == LE || code == LT)
+           ? CC_SESWPmode : CC_ZESWPmode);
+
+  /* For everything else, return CCmode.  */
+  return CCmode;
+}
+
+static unsigned
+aarch64_get_condition_code (rtx x)
+{
+  enum machine_mode mode = GET_MODE (XEXP (x, 0));
+  enum rtx_code comp_code = GET_CODE (x);
+
+  if (GET_MODE_CLASS (mode) != MODE_CC)
+    mode = SELECT_CC_MODE (comp_code, XEXP (x, 0), XEXP (x, 1));
+
+  switch (mode)
+    {
+    case CCFPmode:
+    case CCFPEmode:
+      switch (comp_code)
+       {
+       case GE: return AARCH64_GE;
+       case GT: return AARCH64_GT;
+       case LE: return AARCH64_LS;
+       case LT: return AARCH64_MI;
+       case NE: return AARCH64_NE;
+       case EQ: return AARCH64_EQ;
+       case ORDERED: return AARCH64_VC;
+       case UNORDERED: return AARCH64_VS;
+       case UNLT: return AARCH64_LT;
+       case UNLE: return AARCH64_LE;
+       case UNGT: return AARCH64_HI;
+       case UNGE: return AARCH64_PL;
+       default: gcc_unreachable ();
+       }
+      break;
+
+    case CCmode:
+      switch (comp_code)
+       {
+       case NE: return AARCH64_NE;
+       case EQ: return AARCH64_EQ;
+       case GE: return AARCH64_GE;
+       case GT: return AARCH64_GT;
+       case LE: return AARCH64_LE;
+       case LT: return AARCH64_LT;
+       case GEU: return AARCH64_CS;
+       case GTU: return AARCH64_HI;
+       case LEU: return AARCH64_LS;
+       case LTU: return AARCH64_CC;
+       default: gcc_unreachable ();
+       }
+      break;
+
+    case CC_SWPmode:
+    case CC_ZESWPmode:
+    case CC_SESWPmode:
+      switch (comp_code)
+       {
+       case NE: return AARCH64_NE;
+       case EQ: return AARCH64_EQ;
+       case GE: return AARCH64_LE;
+       case GT: return AARCH64_LT;
+       case LE: return AARCH64_GE;
+       case LT: return AARCH64_GT;
+       case GEU: return AARCH64_LS;
+       case GTU: return AARCH64_CC;
+       case LEU: return AARCH64_CS;
+       case LTU: return AARCH64_HI;
+       default: gcc_unreachable ();
+       }
+      break;
+
+    case CC_NZmode:
+      switch (comp_code)
+       {
+       case NE: return AARCH64_NE;
+       case EQ: return AARCH64_EQ;
+       case GE: return AARCH64_PL;
+       case LT: return AARCH64_MI;
+       default: gcc_unreachable ();
+       }
+      break;
+
+    default:
+      gcc_unreachable ();
+      break;
+    }
+}
+
+static unsigned
+bit_count (unsigned HOST_WIDE_INT value)
+{
+  unsigned count = 0;
+
+  while (value)
+    {
+      count++;
+      value &= value - 1;
+    }
+
+  return count;
+}
+
+void
+aarch64_print_operand (FILE *f, rtx x, char code)
+{
+  switch (code)
+    {
+    case 'e':
+      /* Print the sign/zero-extend size as a character 8->b, 16->h, 32->w.  */
+      {
+       int n;
+
+       if (GET_CODE (x) != CONST_INT
+           || (n = exact_log2 (INTVAL (x) & ~7)) <= 0)
+         {
+           output_operand_lossage ("invalid operand for '%%%c'", code);
+           return;
+         }
+
+       switch (n)
+         {
+         case 3:
+           fputc ('b', f);
+           break;
+         case 4:
+           fputc ('h', f);
+           break;
+         case 5:
+           fputc ('w', f);
+           break;
+         default:
+           output_operand_lossage ("invalid operand for '%%%c'", code);
+           return;
+         }
+      }
+      break;
+
+    case 'p':
+      {
+       int n;
+
+       /* Print N such that 2^N == X.  */
+       if (GET_CODE (x) != CONST_INT || (n = exact_log2 (INTVAL (x))) < 0)
+         {
+           output_operand_lossage ("invalid operand for '%%%c'", code);
+           return;
+         }
+
+       asm_fprintf (f, "%d", n);
+      }
+      break;
+
+    case 'P':
+      /* Print the number of non-zero bits in X (a const_int).  */
+      if (GET_CODE (x) != CONST_INT)
+       {
+         output_operand_lossage ("invalid operand for '%%%c'", code);
+         return;
+       }
+
+      asm_fprintf (f, "%u", bit_count (INTVAL (x)));
+      break;
+
+    case 'H':
+      /* Print the higher numbered register of a pair (TImode) of regs.  */
+      if (GET_CODE (x) != REG || !GP_REGNUM_P (REGNO (x) + 1))
+       {
+         output_operand_lossage ("invalid operand for '%%%c'", code);
+         return;
+       }
+
+      asm_fprintf (f, "%r", REGNO (x) + 1);
+      break;
+
+    case 'Q':
+      /* Print the least significant register of a pair (TImode) of regs.  */
+      if (GET_CODE (x) != REG || !GP_REGNUM_P (REGNO (x) + 1))
+       {
+         output_operand_lossage ("invalid operand for '%%%c'", code);
+         return;
+       }
+      asm_fprintf (f, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
+      break;
+
+    case 'R':
+      /* Print the most significant register of a pair (TImode) of regs.  */
+      if (GET_CODE (x) != REG || !GP_REGNUM_P (REGNO (x) + 1))
+       {
+         output_operand_lossage ("invalid operand for '%%%c'", code);
+         return;
+       }
+      asm_fprintf (f, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
+      break;
+
+    case 'm':
+      /* Print a condition (eq, ne, etc).  */
+
+      /* CONST_TRUE_RTX means always -- that's the default.  */
+      if (x == const_true_rtx)
+       return;
+
+      if (!COMPARISON_P (x))
+       {
+         output_operand_lossage ("invalid operand for '%%%c'", code);
+         return;
+       }
+
+      fputs (aarch64_condition_codes[aarch64_get_condition_code (x)], f);
+      break;
+
+    case 'M':
+      /* Print the inverse of a condition (eq <-> ne, etc).  */
+
+      /* CONST_TRUE_RTX means never -- that's the default.  */
+      if (x == const_true_rtx)
+       {
+         fputs ("nv", f);
+         return;
+       }
+
+      if (!COMPARISON_P (x))
+       {
+         output_operand_lossage ("invalid operand for '%%%c'", code);
+         return;
+       }
+
+      fputs (aarch64_condition_codes[AARCH64_INVERSE_CONDITION_CODE
+                                 (aarch64_get_condition_code (x))], f);
+      break;
+
+    case 'b':
+    case 'h':
+    case 's':
+    case 'd':
+    case 'q':
+      /* Print a scalar FP/SIMD register name.  */
+      if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
+       {
+         output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
+         return;
+       }
+      asm_fprintf (f, "%s%c%d", REGISTER_PREFIX, code, REGNO (x) - V0_REGNUM);
+      break;
+
+    case 'S':
+    case 'T':
+    case 'U':
+    case 'V':
+      /* Print the first FP/SIMD register name in a list.  */
+      if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
+       {
+         output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
+         return;
+       }
+      asm_fprintf (f, "%sv%d", REGISTER_PREFIX,
+                              REGNO (x) - V0_REGNUM + (code - 'S'));
+      break;
+
+    case 'w':
+    case 'x':
+      /* Print a general register name or the zero register (32-bit or
+         64-bit).  */
+      if (x == const0_rtx)
+       {
+         asm_fprintf (f, "%s%czr", REGISTER_PREFIX, code);
+         break;
+       }
+
+      if (REG_P (x) && GP_REGNUM_P (REGNO (x)))
+       {
+         asm_fprintf (f, "%s%c%d", REGISTER_PREFIX, code,
+                      REGNO (x) - R0_REGNUM);
+         break;
+       }
+
+      if (REG_P (x) && REGNO (x) == SP_REGNUM)
+       {
+         asm_fprintf (f, "%s%ssp", REGISTER_PREFIX, code == 'w' ? "w" : "");
+         break;
+       }
+
+      /* Fall through */
+
+    case 0:
+      /* Print a normal operand, if it's a general register, then we
+        assume DImode.  */
+      if (x == NULL)
+       {
+         output_operand_lossage ("missing operand");
+         return;
+       }
+
+      switch (GET_CODE (x))
+       {
+       case REG:
+         asm_fprintf (f, "%r", REGNO (x));
+         break;
+
+       case MEM:
+         aarch64_memory_reference_mode = GET_MODE (x);
+         output_address (XEXP (x, 0));
+         break;
+
+       case LABEL_REF:
+       case SYMBOL_REF:
+         output_addr_const (asm_out_file, x);
+         break;
+
+       case CONST_INT:
+         asm_fprintf (f, "%wd", INTVAL (x));
+         break;
+
+       case CONST_VECTOR:
+         gcc_assert (aarch64_const_vec_all_same_int_p (x, HOST_WIDE_INT_MIN,
+                                                       HOST_WIDE_INT_MAX));
+         asm_fprintf (f, "%wd", INTVAL (CONST_VECTOR_ELT (x, 0)));
+         break;
+
+       default:
+         output_operand_lossage ("invalid operand");
+         return;
+       }
+      break;
+
+    case 'A':
+      if (GET_CODE (x) == HIGH)
+       x = XEXP (x, 0);
+
+      switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
+       {
+       case SYMBOL_SMALL_GOT:
+         asm_fprintf (asm_out_file, ":got:");
+         break;
+
+       case SYMBOL_SMALL_TLSGD:
+         asm_fprintf (asm_out_file, ":tlsgd:");
+         break;
+
+       case SYMBOL_SMALL_TLSDESC:
+         asm_fprintf (asm_out_file, ":tlsdesc:");
+         break;
+
+       case SYMBOL_SMALL_GOTTPREL:
+         asm_fprintf (asm_out_file, ":gottprel:");
+         break;
+
+       case SYMBOL_SMALL_TPREL:
+         asm_fprintf (asm_out_file, ":tprel:");
+         break;
+
+       default:
+         break;
+       }
+      output_addr_const (asm_out_file, x);
+      break;
+
+    case 'L':
+      switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
+       {
+       case SYMBOL_SMALL_GOT:
+         asm_fprintf (asm_out_file, ":lo12:");
+         break;
+
+       case SYMBOL_SMALL_TLSGD:
+         asm_fprintf (asm_out_file, ":tlsgd_lo12:");
+         break;
+
+       case SYMBOL_SMALL_TLSDESC:
+         asm_fprintf (asm_out_file, ":tlsdesc_lo12:");
+         break;
+
+       case SYMBOL_SMALL_GOTTPREL:
+         asm_fprintf (asm_out_file, ":gottprel_lo12:");
+         break;
+
+       case SYMBOL_SMALL_TPREL:
+         asm_fprintf (asm_out_file, ":tprel_lo12_nc:");
+         break;
+
+       default:
+         break;
+       }
+      output_addr_const (asm_out_file, x);
+      break;
+
+    case 'G':
+
+      switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
+       {
+       case SYMBOL_SMALL_TPREL:
+         asm_fprintf (asm_out_file, ":tprel_hi12:");
+         break;
+       default:
+         break;
+       }
+      output_addr_const (asm_out_file, x);
+      break;
+
+    default:
+      output_operand_lossage ("invalid operand prefix '%%%c'", code);
+      return;
+    }
+}
+
+void
+aarch64_print_operand_address (FILE *f, rtx x)
+{
+  struct aarch64_address_info addr;
+
+  if (aarch64_classify_address (&addr, x, aarch64_memory_reference_mode,
+                            MEM, true))
+    switch (addr.type)
+      {
+      case ADDRESS_REG_IMM:
+       if (addr.offset == const0_rtx)
+         asm_fprintf (f, "[%r]", REGNO (addr.base));
+       else
+         asm_fprintf (f, "[%r,%wd]", REGNO (addr.base),
+                      INTVAL (addr.offset));
+       return;
+
+      case ADDRESS_REG_REG:
+       if (addr.shift == 0)
+         asm_fprintf (f, "[%r,%r]", REGNO (addr.base),
+                      REGNO (addr.offset));
+       else
+         asm_fprintf (f, "[%r,%r,lsl %u]", REGNO (addr.base),
+                      REGNO (addr.offset), addr.shift);
+       return;
+
+      case ADDRESS_REG_UXTW:
+       if (addr.shift == 0)
+         asm_fprintf (f, "[%r,w%d,uxtw]", REGNO (addr.base),
+                      REGNO (addr.offset) - R0_REGNUM);
+       else
+         asm_fprintf (f, "[%r,w%d,uxtw %u]", REGNO (addr.base),
+                      REGNO (addr.offset) - R0_REGNUM, addr.shift);
+       return;
+
+      case ADDRESS_REG_SXTW:
+       if (addr.shift == 0)
+         asm_fprintf (f, "[%r,w%d,sxtw]", REGNO (addr.base),
+                      REGNO (addr.offset) - R0_REGNUM);
+       else
+         asm_fprintf (f, "[%r,w%d,sxtw %u]", REGNO (addr.base),
+                      REGNO (addr.offset) - R0_REGNUM, addr.shift);
+       return;
+
+      case ADDRESS_REG_WB:
+       switch (GET_CODE (x))
+         {
+         case PRE_INC:
+           asm_fprintf (f, "[%r,%d]!", REGNO (addr.base),
+                        GET_MODE_SIZE (aarch64_memory_reference_mode));
+           return;
+         case POST_INC:
+           asm_fprintf (f, "[%r],%d", REGNO (addr.base),
+                        GET_MODE_SIZE (aarch64_memory_reference_mode));
+           return;
+         case PRE_DEC:
+           asm_fprintf (f, "[%r,-%d]!", REGNO (addr.base),
+                        GET_MODE_SIZE (aarch64_memory_reference_mode));
+           return;
+         case POST_DEC:
+           asm_fprintf (f, "[%r],-%d", REGNO (addr.base),
+                        GET_MODE_SIZE (aarch64_memory_reference_mode));
+           return;
+         case PRE_MODIFY:
+           asm_fprintf (f, "[%r,%wd]!", REGNO (addr.base),
+                        INTVAL (addr.offset));
+           return;
+         case POST_MODIFY:
+           asm_fprintf (f, "[%r],%wd", REGNO (addr.base),
+                        INTVAL (addr.offset));
+           return;
+         default:
+           break;
+         }
+       break;
+
+      case ADDRESS_LO_SUM:
+       asm_fprintf (f, "[%r,#:lo12:", REGNO (addr.base));
+       output_addr_const (f, addr.offset);
+       asm_fprintf (f, "]");
+       return;
+
+      case ADDRESS_SYMBOLIC:
+       break;
+      }
+
+  output_addr_const (f, x);
+}
+
+void
+aarch64_function_profiler (FILE *f ATTRIBUTE_UNUSED,
+                          int labelno ATTRIBUTE_UNUSED)
+{
+  sorry ("function profiling");
+}
+
+bool
+aarch64_label_mentioned_p (rtx x)
+{
+  const char *fmt;
+  int i;
+
+  if (GET_CODE (x) == LABEL_REF)
+    return true;
+
+  /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the
+     referencing instruction, but they are constant offsets, not
+     symbols.  */
+  if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
+    return false;
+
+  fmt = GET_RTX_FORMAT (GET_CODE (x));
+  for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+    {
+      if (fmt[i] == 'E')
+       {
+         int j;
+
+         for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+           if (aarch64_label_mentioned_p (XVECEXP (x, i, j)))
+             return 1;
+       }
+      else if (fmt[i] == 'e' && aarch64_label_mentioned_p (XEXP (x, i)))
+       return 1;
+    }
+
+  return 0;
+}
+
+/* Implement REGNO_REG_CLASS.  */
+
+enum reg_class
+aarch64_regno_regclass (unsigned regno)
+{
+  if (GP_REGNUM_P (regno))
+    return CORE_REGS;
+
+  if (regno == SP_REGNUM)
+    return STACK_REG;
+
+  if (regno == FRAME_POINTER_REGNUM
+      || regno == ARG_POINTER_REGNUM)
+    return CORE_REGS;
+
+  if (FP_REGNUM_P (regno))
+    return FP_LO_REGNUM_P (regno) ?  FP_LO_REGS : FP_REGS;
+
+  return NO_REGS;
+}
+
+/* Try a machine-dependent way of reloading an illegitimate address
+   operand.  If we find one, push the reload and return the new rtx.  */
+
+rtx
+aarch64_legitimize_reload_address (rtx *x_p,
+                                  enum machine_mode mode,
+                                  int opnum, int type,
+                                  int ind_levels ATTRIBUTE_UNUSED)
+{
+  rtx x = *x_p;
+
+  /* Do not allow mem (plus (reg, const)) if vector mode.  */
+  if (aarch64_vector_mode_p (mode)
+      && GET_CODE (x) == PLUS
+      && REG_P (XEXP (x, 0))
+      && CONST_INT_P (XEXP (x, 1)))
+    {
+      rtx orig_rtx = x;
+      x = copy_rtx (x);
+      push_reload (orig_rtx, NULL_RTX, x_p, NULL,
+                  BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+                  opnum, (enum reload_type) type);
+      return x;
+    }
+
+  /* We must recognize output that we have already generated ourselves.  */
+  if (GET_CODE (x) == PLUS
+      && GET_CODE (XEXP (x, 0)) == PLUS
+      && REG_P (XEXP (XEXP (x, 0), 0))
+      && CONST_INT_P (XEXP (XEXP (x, 0), 1))
+      && CONST_INT_P (XEXP (x, 1)))
+    {
+      push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+                  BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+                  opnum, (enum reload_type) type);
+      return x;
+    }
+
+  /* We wish to handle large displacements off a base register by splitting
+     the addend across an add and the mem insn.  This can cut the number of
+     extra insns needed from 3 to 1.  It is only useful for load/store of a
+     single register with 12 bit offset field.  */
+  if (GET_CODE (x) == PLUS
+      && REG_P (XEXP (x, 0))
+      && CONST_INT_P (XEXP (x, 1))
+      && HARD_REGISTER_P (XEXP (x, 0))
+      && mode != TImode
+      && mode != TFmode
+      && aarch64_regno_ok_for_base_p (REGNO (XEXP (x, 0)), true))
+    {
+      HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
+      HOST_WIDE_INT low = val & 0xfff;
+      HOST_WIDE_INT high = val - low;
+      HOST_WIDE_INT offs;
+      rtx cst;
+
+      /* Reload non-zero BLKmode offsets.  This is because we cannot ascertain
+        BLKmode alignment.  */
+      if (GET_MODE_SIZE (mode) == 0)
+       return NULL_RTX;
+
+      offs = low % GET_MODE_SIZE (mode);
+
+      /* Align misaligned offset by adjusting high part to compensate.  */
+      if (offs != 0)
+       {
+         if (aarch64_uimm12_shift (high + offs))
+           {
+             /* Align down.  */
+             low = low - offs;
+             high = high + offs;
+           }
+         else
+           {
+             /* Align up.  */
+             offs = GET_MODE_SIZE (mode) - offs;
+             low = low + offs;
+             high = high + (low & 0x1000) - offs;
+             low &= 0xfff;
+           }
+       }
+
+      /* Check for overflow.  */
+      if (high + low != val)
+       return NULL_RTX;
+
+      cst = GEN_INT (high);
+      if (!aarch64_uimm12_shift (high))
+       cst = force_const_mem (Pmode, cst);
+
+      /* Reload high part into base reg, leaving the low part
+        in the mem instruction.  */
+      x = gen_rtx_PLUS (Pmode,
+                       gen_rtx_PLUS (Pmode, XEXP (x, 0), cst),
+                       GEN_INT (low));
+
+      push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+                  BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+                  opnum, (enum reload_type) type);
+      return x;
+    }
+
+  return NULL_RTX;
+}
+
+
+static reg_class_t
+aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
+                         reg_class_t rclass,
+                         enum machine_mode mode,
+                         secondary_reload_info *sri)
+{
+  /* Address expressions of the form PLUS (SP, large_offset) need two
+     scratch registers, one for the constant, and one for holding a
+     copy of SP, since SP cannot be used on the RHS of an add-reg
+     instruction.  */
+  if (mode == DImode
+      && GET_CODE (x) == PLUS
+      && XEXP (x, 0) == stack_pointer_rtx
+      && CONST_INT_P (XEXP (x, 1))
+      && !aarch64_uimm12_shift (INTVAL (XEXP (x, 1))))
+    {
+      sri->icode = CODE_FOR_reload_sp_immediate;
+      return NO_REGS;
+    }
+
+  /* Without the TARGET_SIMD instructions we cannot move a Q register
+     to a Q register directly.  We need a scratch.  */
+  if (REG_P (x) && (mode == TFmode || mode == TImode) && mode == GET_MODE (x)
+      && FP_REGNUM_P (REGNO (x)) && !TARGET_SIMD
+      && reg_class_subset_p (rclass, FP_REGS))
+    {
+      if (mode == TFmode)
+        sri->icode = CODE_FOR_aarch64_reload_movtf;
+      else if (mode == TImode)
+        sri->icode = CODE_FOR_aarch64_reload_movti;
+      return NO_REGS;
+    }
+
+  /* A TFmode or TImode memory access should be handled via an FP_REGS
+     because AArch64 has richer addressing modes for LDR/STR instructions
+     than LDP/STP instructions.  */
+  if (!TARGET_GENERAL_REGS_ONLY && rclass == CORE_REGS
+      && GET_MODE_SIZE (mode) == 16 && MEM_P (x))
+    return FP_REGS;
+
+  if (rclass == FP_REGS && (mode == TImode || mode == TFmode) && CONSTANT_P(x))
+      return CORE_REGS;
+
+  return NO_REGS;
+}
+
+static bool
+aarch64_can_eliminate (const int from, const int to)
+{
+  /* If we need a frame pointer, we must eliminate FRAME_POINTER_REGNUM into
+     HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM.  */
+
+  if (frame_pointer_needed)
+    {
+      if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+       return true;
+      if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+       return false;
+      if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
+         && !cfun->calls_alloca)
+       return true;
+      if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+       return true;
+    return false;
+    }
+  else
+    {
+      /* If we decided that we didn't need a frame pointer but then used
+        LR in the function, then we do need a frame pointer after all, so
+        prevent this elimination to ensure a frame pointer is used.  */
+
+      if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
+         && df_regs_ever_live_p (LR_REGNUM))
+       return false;
+    }
+  return true;
+}
+
+HOST_WIDE_INT
+aarch64_initial_elimination_offset (unsigned from, unsigned to)
+{
+  HOST_WIDE_INT frame_size;
+  HOST_WIDE_INT offset;
+
+  aarch64_layout_frame ();
+  frame_size = (get_frame_size () + cfun->machine->frame.saved_regs_size
+               + crtl->outgoing_args_size
+               + cfun->machine->saved_varargs_size);
+
+   frame_size = AARCH64_ROUND_UP (frame_size, STACK_BOUNDARY / BITS_PER_UNIT);
+   offset = frame_size;
+
+   if (to == HARD_FRAME_POINTER_REGNUM)
+     {
+       if (from == ARG_POINTER_REGNUM)
+        return offset - crtl->outgoing_args_size;
+
+       if (from == FRAME_POINTER_REGNUM)
+        return cfun->machine->frame.saved_regs_size;
+     }
+
+   if (to == STACK_POINTER_REGNUM)
+     {
+       if (from == FRAME_POINTER_REGNUM)
+         {
+           HOST_WIDE_INT elim = crtl->outgoing_args_size
+                              + cfun->machine->frame.saved_regs_size
+                              - cfun->machine->frame.fp_lr_offset;
+           elim = AARCH64_ROUND_UP (elim, STACK_BOUNDARY / BITS_PER_UNIT);
+           return elim;
+         }
+     }
+
+   return offset;
+}
+
+
+/* Implement RETURN_ADDR_RTX.  We do not support moving back to a
+   previous frame.  */
+
+rtx
+aarch64_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
+{
+  if (count != 0)
+    return const0_rtx;
+  return get_hard_reg_initial_val (Pmode, LR_REGNUM);
+}
+
+
+static void
+aarch64_asm_trampoline_template (FILE *f)
+{
+  asm_fprintf (f, "\tldr\t%r, .+16\n", IP1_REGNUM);
+  asm_fprintf (f, "\tldr\t%r, .+20\n", STATIC_CHAIN_REGNUM);
+  asm_fprintf (f, "\tbr\t%r\n", IP1_REGNUM);
+  assemble_aligned_integer (4, const0_rtx);
+  assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
+  assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
+}
+
+unsigned
+aarch64_trampoline_size (void)
+{
+  return 32;  /* 3 insns + padding + 2 dwords.  */
+}
+
+static void
+aarch64_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
+{
+  rtx fnaddr, mem, a_tramp;
+
+  /* Don't need to copy the trailing D-words, we fill those in below.  */
+  emit_block_move (m_tramp, assemble_trampoline_template (),
+                  GEN_INT (TRAMPOLINE_SIZE - 16), BLOCK_OP_NORMAL);
+  mem = adjust_address (m_tramp, DImode, 16);
+  fnaddr = XEXP (DECL_RTL (fndecl), 0);
+  emit_move_insn (mem, fnaddr);
+
+  mem = adjust_address (m_tramp, DImode, 24);
+  emit_move_insn (mem, chain_value);
+
+  /* XXX We should really define a "clear_cache" pattern and use
+     gen_clear_cache().  */
+  a_tramp = XEXP (m_tramp, 0);
+  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"),
+                    LCT_NORMAL, VOIDmode, 2, a_tramp, Pmode,
+                    plus_constant (Pmode, a_tramp, TRAMPOLINE_SIZE), Pmode);
+}
+
+static unsigned char
+aarch64_class_max_nregs (reg_class_t regclass, enum machine_mode mode)
+{
+  switch (regclass)
+    {
+    case CORE_REGS:
+    case POINTER_REGS:
+    case GENERAL_REGS:
+    case ALL_REGS:
+    case FP_REGS:
+    case FP_LO_REGS:
+      return
+       aarch64_vector_mode_p (mode) ? (GET_MODE_SIZE (mode) + 15) / 16 :
+                                      (GET_MODE_SIZE (mode) + 7) / 8;
+    case STACK_REG:
+      return 1;
+
+    case NO_REGS:
+      return 0;
+
+    default:
+      break;
+    }
+  gcc_unreachable ();
+}
+
+static reg_class_t
+aarch64_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t regclass)
+{
+  return ((regclass == POINTER_REGS || regclass == STACK_REG)
+         ? GENERAL_REGS : regclass);
+}
+
+void
+aarch64_asm_output_labelref (FILE* f, const char *name)
+{
+  asm_fprintf (f, "%U%s", name);
+}
+
+static void
+aarch64_elf_asm_constructor (rtx symbol, int priority)
+{
+  if (priority == DEFAULT_INIT_PRIORITY)
+    default_ctor_section_asm_out_constructor (symbol, priority);
+  else
+    {
+      section *s;
+      char buf[18];
+      snprintf (buf, sizeof (buf), ".init_array.%.5u", priority);
+      s = get_section (buf, SECTION_WRITE, NULL);
+      switch_to_section (s);
+      assemble_align (POINTER_SIZE);
+      fputs ("\t.dword\t", asm_out_file);
+      output_addr_const (asm_out_file, symbol);
+      fputc ('\n', asm_out_file);
+    }
+}
+
+static void
+aarch64_elf_asm_destructor (rtx symbol, int priority)
+{
+  if (priority == DEFAULT_INIT_PRIORITY)
+    default_dtor_section_asm_out_destructor (symbol, priority);
+  else
+    {
+      section *s;
+      char buf[18];
+      snprintf (buf, sizeof (buf), ".fini_array.%.5u", priority);
+      s = get_section (buf, SECTION_WRITE, NULL);
+      switch_to_section (s);
+      assemble_align (POINTER_SIZE);
+      fputs ("\t.dword\t", asm_out_file);
+      output_addr_const (asm_out_file, symbol);
+      fputc ('\n', asm_out_file);
+    }
+}
+
+const char*
+aarch64_output_casesi (rtx *operands)
+{
+  char buf[100];
+  char label[100];
+  rtx diff_vec = PATTERN (next_real_insn (operands[2]));
+  int index;
+  static const char *const patterns[4][2] =
+  {
+    {
+      "ldrb\t%w3, [%0,%w1,uxtw]",
+      "add\t%3, %4, %w3, sxtb #2"
+    },
+    {
+      "ldrh\t%w3, [%0,%w1,uxtw #1]",
+      "add\t%3, %4, %w3, sxth #2"
+    },
+    {
+      "ldr\t%w3, [%0,%w1,uxtw #2]",
+      "add\t%3, %4, %w3, sxtw #2"
+    },
+    /* We assume that DImode is only generated when not optimizing and
+       that we don't really need 64-bit address offsets.  That would
+       imply an object file with 8GB of code in a single function!  */
+    {
+      "ldr\t%w3, [%0,%w1,uxtw #2]",
+      "add\t%3, %4, %w3, sxtw #2"
+    }
+  };
+
+  gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
+
+  index = exact_log2 (GET_MODE_SIZE (GET_MODE (diff_vec)));
+
+  gcc_assert (index >= 0 && index <= 3);
+
+  /* Need to implement table size reduction, by chaning the code below.  */
+  output_asm_insn (patterns[index][0], operands);
+  ASM_GENERATE_INTERNAL_LABEL (label, "Lrtx", CODE_LABEL_NUMBER (operands[2]));
+  snprintf (buf, sizeof (buf),
+           "adr\t%%4, %s", targetm.strip_name_encoding (label));
+  output_asm_insn (buf, operands);
+  output_asm_insn (patterns[index][1], operands);
+  output_asm_insn ("br\t%3", operands);
+  assemble_label (asm_out_file, label);
+  return "";
+}
+
+
+/* Return size in bits of an arithmetic operand which is shifted/scaled and
+   masked such that it is suitable for a UXTB, UXTH, or UXTW extend
+   operator.  */
+
+int
+aarch64_uxt_size (int shift, HOST_WIDE_INT mask)
+{
+  if (shift >= 0 && shift <= 3)
+    {
+      int size;
+      for (size = 8; size <= 32; size *= 2)
+       {
+         HOST_WIDE_INT bits = ((HOST_WIDE_INT)1U << size) - 1;
+         if (mask == bits << shift)
+           return size;
+       }
+    }
+  return 0;
+}
+
+static bool
+aarch64_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+                                  const_rtx x ATTRIBUTE_UNUSED)
+{
+  /* We can't use blocks for constants when we're using a per-function
+     constant pool.  */
+  return false;
+}
+
+static section *
+aarch64_select_rtx_section (enum machine_mode mode ATTRIBUTE_UNUSED,
+                           rtx x ATTRIBUTE_UNUSED,
+                           unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
+{
+  /* Force all constant pool entries into the current function section.  */
+  return function_section (current_function_decl);
+}
+
+
+/* Costs.  */
+
+/* Helper function for rtx cost calculation.  Strip a shift expression
+   from X.  Returns the inner operand if successful, or the original
+   expression on failure.  */
+static rtx
+aarch64_strip_shift (rtx x)
+{
+  rtx op = x;
+
+  if ((GET_CODE (op) == ASHIFT
+       || GET_CODE (op) == ASHIFTRT
+       || GET_CODE (op) == LSHIFTRT)
+      && CONST_INT_P (XEXP (op, 1)))
+    return XEXP (op, 0);
+
+  if (GET_CODE (op) == MULT
+      && CONST_INT_P (XEXP (op, 1))
+      && ((unsigned) exact_log2 (INTVAL (XEXP (op, 1)))) < 64)
+    return XEXP (op, 0);
+
+  return x;
+}
+
+/* Helper function for rtx cost calculation.  Strip a shift or extend
+   expression from X.  Returns the inner operand if successful, or the
+   original expression on failure.  We deal with a number of possible
+   canonicalization variations here.  */
+static rtx
+aarch64_strip_shift_or_extend (rtx x)
+{
+  rtx op = x;
+
+  /* Zero and sign extraction of a widened value.  */
+  if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
+      && XEXP (op, 2) == const0_rtx
+      && aarch64_is_extend_from_extract (GET_MODE (op), XEXP (XEXP (op, 0), 1),
+                                        XEXP (op, 1)))
+    return XEXP (XEXP (op, 0), 0);
+
+  /* It can also be represented (for zero-extend) as an AND with an
+     immediate.  */
+  if (GET_CODE (op) == AND
+      && GET_CODE (XEXP (op, 0)) == MULT
+      && CONST_INT_P (XEXP (XEXP (op, 0), 1))
+      && CONST_INT_P (XEXP (op, 1))
+      && aarch64_uxt_size (exact_log2 (INTVAL (XEXP (XEXP (op, 0), 1))),
+                          INTVAL (XEXP (op, 1))) != 0)
+    return XEXP (XEXP (op, 0), 0);
+
+  /* Now handle extended register, as this may also have an optional
+     left shift by 1..4.  */
+  if (GET_CODE (op) == ASHIFT
+      && CONST_INT_P (XEXP (op, 1))
+      && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (op, 1))) <= 4)
+    op = XEXP (op, 0);
+
+  if (GET_CODE (op) == ZERO_EXTEND
+      || GET_CODE (op) == SIGN_EXTEND)
+    op = XEXP (op, 0);
+
+  if (op != x)
+    return op;
+
+  return aarch64_strip_shift (x);
+}
+
+/* Calculate the cost of calculating X, storing it in *COST.  Result
+   is true if the total cost of the operation has now been calculated.  */
+static bool
+aarch64_rtx_costs (rtx x, int code, int outer ATTRIBUTE_UNUSED,
+                  int param ATTRIBUTE_UNUSED, int *cost, bool speed)
+{
+  rtx op0, op1;
+  const struct cpu_rtx_cost_table *extra_cost
+    = aarch64_tune_params->insn_extra_cost;
+
+  switch (code)
+    {
+    case SET:
+      op0 = SET_DEST (x);
+      op1 = SET_SRC (x);
+
+      switch (GET_CODE (op0))
+       {
+       case MEM:
+         if (speed)
+           *cost += extra_cost->memory_store;
+
+         if (op1 != const0_rtx)
+           *cost += rtx_cost (op1, SET, 1, speed);
+         return true;
+
+       case SUBREG:
+         if (! REG_P (SUBREG_REG (op0)))
+           *cost += rtx_cost (SUBREG_REG (op0), SET, 0, speed);
+         /* Fall through.  */
+       case REG:
+         /* Cost is just the cost of the RHS of the set.  */
+         *cost += rtx_cost (op1, SET, 1, true);
+         return true;
+
+       case ZERO_EXTRACT:  /* Bit-field insertion.  */
+       case SIGN_EXTRACT:
+         /* Strip any redundant widening of the RHS to meet the width of
+            the target.  */
+         if (GET_CODE (op1) == SUBREG)
+           op1 = SUBREG_REG (op1);
+         if ((GET_CODE (op1) == ZERO_EXTEND
+              || GET_CODE (op1) == SIGN_EXTEND)
+             && GET_CODE (XEXP (op0, 1)) == CONST_INT
+             && (GET_MODE_BITSIZE (GET_MODE (XEXP (op1, 0)))
+                 >= INTVAL (XEXP (op0, 1))))
+           op1 = XEXP (op1, 0);
+         *cost += rtx_cost (op1, SET, 1, speed);
+         return true;
+
+       default:
+         break;
+       }
+      return false;
+
+    case MEM:
+      if (speed)
+       *cost += extra_cost->memory_load;
+
+      return true;
+
+    case NEG:
+      op0 = CONST0_RTX (GET_MODE (x));
+      op1 = XEXP (x, 0);
+      goto cost_minus;
+
+    case COMPARE:
+      op0 = XEXP (x, 0);
+      op1 = XEXP (x, 1);
+
+      if (op1 == const0_rtx
+         && GET_CODE (op0) == AND)
+       {
+         x = op0;
+         goto cost_logic;
+       }
+
+      /* Comparisons can work if the order is swapped.
+        Canonicalization puts the more complex operation first, but
+        we want it in op1.  */
+      if (! (REG_P (op0)
+            || (GET_CODE (op0) == SUBREG && REG_P (SUBREG_REG (op0)))))
+       {
+         op0 = XEXP (x, 1);
+         op1 = XEXP (x, 0);
+       }
+      goto cost_minus;
+
+    case MINUS:
+      op0 = XEXP (x, 0);
+      op1 = XEXP (x, 1);
+
+    cost_minus:
+      if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
+         || (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
+             && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT))
+       {
+         if (op0 != const0_rtx)
+           *cost += rtx_cost (op0, MINUS, 0, speed);
+
+         if (CONST_INT_P (op1))
+           {
+             if (!aarch64_uimm12_shift (INTVAL (op1)))
+               *cost += rtx_cost (op1, MINUS, 1, speed);
+           }
+         else
+           {
+             op1 = aarch64_strip_shift_or_extend (op1);
+             *cost += rtx_cost (op1, MINUS, 1, speed);
+           }
+         return true;
+       }
+
+      return false;
+
+    case PLUS:
+      op0 = XEXP (x, 0);
+      op1 = XEXP (x, 1);
+
+      if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+       {
+         if (CONST_INT_P (op1) && aarch64_uimm12_shift (INTVAL (op1)))
+           {
+             *cost += rtx_cost (op0, PLUS, 0, speed);
+           }
+         else
+           {
+             rtx new_op0 = aarch64_strip_shift_or_extend (op0);
+
+             if (new_op0 == op0
+                 && GET_CODE (op0) == MULT)
+               {
+                 if ((GET_CODE (XEXP (op0, 0)) == ZERO_EXTEND
+                      && GET_CODE (XEXP (op0, 1)) == ZERO_EXTEND)
+                     || (GET_CODE (XEXP (op0, 0)) == SIGN_EXTEND
+                         && GET_CODE (XEXP (op0, 1)) == SIGN_EXTEND))
+                   {
+                     *cost += (rtx_cost (XEXP (XEXP (op0, 0), 0), MULT, 0,
+                                         speed)
+                               + rtx_cost (XEXP (XEXP (op0, 1), 0), MULT, 1,
+                                           speed)
+                               + rtx_cost (op1, PLUS, 1, speed));
+                     if (speed)
+                       *cost += extra_cost->int_multiply_extend_add;
+                     return true;
+                   }
+                 *cost += (rtx_cost (XEXP (op0, 0), MULT, 0, speed)
+                           + rtx_cost (XEXP (op0, 1), MULT, 1, speed)
+                           + rtx_cost (op1, PLUS, 1, speed));
+
+                 if (speed)
+                   *cost += extra_cost->int_multiply_add;
+               }
+
+             *cost += (rtx_cost (new_op0, PLUS, 0, speed)
+                       + rtx_cost (op1, PLUS, 1, speed));
+           }
+         return true;
+       }
+
+      return false;
+
+    case IOR:
+    case XOR:
+    case AND:
+    cost_logic:
+      op0 = XEXP (x, 0);
+      op1 = XEXP (x, 1);
+
+      if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+       {
+         if (CONST_INT_P (op1)
+             && aarch64_bitmask_imm (INTVAL (op1), GET_MODE (x)))
+           {
+             *cost += rtx_cost (op0, AND, 0, speed);
+           }
+         else
+           {
+             if (GET_CODE (op0) == NOT)
+               op0 = XEXP (op0, 0);
+             op0 = aarch64_strip_shift (op0);
+             *cost += (rtx_cost (op0, AND, 0, speed)
+                       + rtx_cost (op1, AND, 1, speed));
+           }
+         return true;
+       }
+      return false;
+
+    case ZERO_EXTEND:
+      if ((GET_MODE (x) == DImode
+          && GET_MODE (XEXP (x, 0)) == SImode)
+         || GET_CODE (XEXP (x, 0)) == MEM)
+       {
+         *cost += rtx_cost (XEXP (x, 0), ZERO_EXTEND, 0, speed);
+         return true;
+       }
+      return false;
+
+    case SIGN_EXTEND:
+      if (GET_CODE (XEXP (x, 0)) == MEM)
+       {
+         *cost += rtx_cost (XEXP (x, 0), SIGN_EXTEND, 0, speed);
+         return true;
+       }
+      return false;
+
+    case ROTATE:
+      if (!CONST_INT_P (XEXP (x, 1)))
+       *cost += COSTS_N_INSNS (2);
+      /* Fall through.  */
+    case ROTATERT:
+    case LSHIFTRT:
+    case ASHIFT:
+    case ASHIFTRT:
+
+      /* Shifting by a register often takes an extra cycle.  */
+      if (speed && !CONST_INT_P (XEXP (x, 1)))
+       *cost += extra_cost->register_shift;
+
+      *cost += rtx_cost (XEXP (x, 0), ASHIFT, 0, speed);
+      return true;
+
+    case HIGH:
+      if (!CONSTANT_P (XEXP (x, 0)))
+       *cost += rtx_cost (XEXP (x, 0), HIGH, 0, speed);
+      return true;
+
+    case LO_SUM:
+      if (!CONSTANT_P (XEXP (x, 1)))
+       *cost += rtx_cost (XEXP (x, 1), LO_SUM, 1, speed);
+      *cost += rtx_cost (XEXP (x, 0), LO_SUM, 0, speed);
+      return true;
+
+    case ZERO_EXTRACT:
+    case SIGN_EXTRACT:
+      *cost += rtx_cost (XEXP (x, 0), ZERO_EXTRACT, 0, speed);
+      return true;
+
+    case MULT:
+      op0 = XEXP (x, 0);
+      op1 = XEXP (x, 1);
+
+      *cost = COSTS_N_INSNS (1);
+      if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+       {
+         if (CONST_INT_P (op1)
+             && exact_log2 (INTVAL (op1)) > 0)
+           {
+             *cost += rtx_cost (op0, ASHIFT, 0, speed);
+             return true;
+           }
+
+         if ((GET_CODE (op0) == ZERO_EXTEND
+              && GET_CODE (op1) == ZERO_EXTEND)
+             || (GET_CODE (op0) == SIGN_EXTEND
+                 && GET_CODE (op1) == SIGN_EXTEND))
+           {
+             *cost += (rtx_cost (XEXP (op0, 0), MULT, 0, speed)
+                       + rtx_cost (XEXP (op1, 0), MULT, 1, speed));
+             if (speed)
+               *cost += extra_cost->int_multiply_extend;
+             return true;
+           }
+
+         if (speed)
+           *cost += extra_cost->int_multiply;
+       }
+      else if (speed)
+       {
+         if (GET_MODE (x) == DFmode)
+           *cost += extra_cost->double_multiply;
+         else if (GET_MODE (x) == SFmode)
+           *cost += extra_cost->float_multiply;
+       }
+
+      return false;  /* All arguments need to be in registers.  */
+
+    case MOD:
+    case UMOD:
+      *cost = COSTS_N_INSNS (2);
+      if (speed)
+       {
+         if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+           *cost += (extra_cost->int_multiply_add
+                     + extra_cost->int_divide);
+         else if (GET_MODE (x) == DFmode)
+           *cost += (extra_cost->double_multiply
+                     + extra_cost->double_divide);
+         else if (GET_MODE (x) == SFmode)
+           *cost += (extra_cost->float_multiply
+                     + extra_cost->float_divide);
+       }
+      return false;  /* All arguments need to be in registers.  */
+
+    case DIV:
+    case UDIV:
+      *cost = COSTS_N_INSNS (1);
+      if (speed)
+       {
+         if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+           *cost += extra_cost->int_divide;
+         else if (GET_MODE (x) == DFmode)
+           *cost += extra_cost->double_divide;
+         else if (GET_MODE (x) == SFmode)
+           *cost += extra_cost->float_divide;
+       }
+      return false;  /* All arguments need to be in registers.  */
+
+    default:
+      break;
+    }
+  return false;
+}
+
+static int
+aarch64_address_cost (rtx x ATTRIBUTE_UNUSED,
+                 enum machine_mode mode ATTRIBUTE_UNUSED,
+                 addr_space_t as ATTRIBUTE_UNUSED, bool speed ATTRIBUTE_UNUSED)
+{
+  enum rtx_code c  = GET_CODE (x);
+  const struct cpu_addrcost_table *addr_cost = aarch64_tune_params->addr_cost;
+
+  if (c == PRE_INC || c == PRE_DEC || c == PRE_MODIFY)
+    return addr_cost->pre_modify;
+
+  if (c == POST_INC || c == POST_DEC || c == POST_MODIFY)
+    return addr_cost->post_modify;
+
+  if (c == PLUS)
+    {
+      if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+       return addr_cost->imm_offset;
+      else if (GET_CODE (XEXP (x, 0)) == MULT
+              || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
+              || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
+       return addr_cost->register_extend;
+
+      return addr_cost->register_offset;
+    }
+  else if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
+    return addr_cost->imm_offset;
+
+  return 0;
+}
+
+static int
+aarch64_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
+                           reg_class_t from, reg_class_t to)
+{
+  const struct cpu_regmove_cost *regmove_cost
+    = aarch64_tune_params->regmove_cost;
+
+  if (from == GENERAL_REGS && to == GENERAL_REGS)
+    return regmove_cost->GP2GP;
+  else if (from == GENERAL_REGS)
+    return regmove_cost->GP2FP;
+  else if (to == GENERAL_REGS)
+    return regmove_cost->FP2GP;
+
+  /* When AdvSIMD instructions are disabled it is not possible to move
+     a 128-bit value directly between Q registers.  This is handled in
+     secondary reload.  A general register is used as a scratch to move
+     the upper DI value and the lower DI value is moved directly,
+     hence the cost is the sum of three moves. */
+
+  if (! TARGET_SIMD && GET_MODE_SIZE (from) == 128 && GET_MODE_SIZE (to) == 128)
+    return regmove_cost->GP2FP + regmove_cost->FP2GP + regmove_cost->FP2FP;
+
+  return regmove_cost->FP2FP;
+}
+
+static int
+aarch64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
+                         reg_class_t rclass ATTRIBUTE_UNUSED,
+                         bool in ATTRIBUTE_UNUSED)
+{
+  return aarch64_tune_params->memmov_cost;
+}
+
+static void initialize_aarch64_code_model (void);
+
+/* Parse the architecture extension string.  */
+
+static void
+aarch64_parse_extension (char *str)
+{
+  /* The extension string is parsed left to right.  */
+  const struct aarch64_option_extension *opt = NULL;
+
+  /* Flag to say whether we are adding or removing an extension.  */
+  int adding_ext = -1;
+
+  while (str != NULL && *str != 0)
+    {
+      char *ext;
+      size_t len;
+
+      str++;
+      ext = strchr (str, '+');
+
+      if (ext != NULL)
+       len = ext - str;
+      else
+       len = strlen (str);
+
+      if (len >= 2 && strncmp (str, "no", 2) == 0)
+       {
+         adding_ext = 0;
+         len -= 2;
+         str += 2;
+       }
+      else if (len > 0)
+       adding_ext = 1;
+
+      if (len == 0)
+       {
+         error ("missing feature modifier after %qs", "+no");
+         return;
+       }
+
+      /* Scan over the extensions table trying to find an exact match.  */
+      for (opt = all_extensions; opt->name != NULL; opt++)
+       {
+         if (strlen (opt->name) == len && strncmp (opt->name, str, len) == 0)
+           {
+             /* Add or remove the extension.  */
+             if (adding_ext)
+               aarch64_isa_flags |= opt->flags_on;
+             else
+               aarch64_isa_flags &= ~(opt->flags_off);
+             break;
+           }
+       }
+
+      if (opt->name == NULL)
+       {
+         /* Extension not found in list.  */
+         error ("unknown feature modifier %qs", str);
+         return;
+       }
+
+      str = ext;
+    };
+
+  return;
+}
+
+/* Parse the ARCH string.  */
+
+static void
+aarch64_parse_arch (void)
+{
+  char *ext;
+  const struct processor *arch;
+  char *str = (char *) alloca (strlen (aarch64_arch_string) + 1);
+  size_t len;
+
+  strcpy (str, aarch64_arch_string);
+
+  ext = strchr (str, '+');
+
+  if (ext != NULL)
+    len = ext - str;
+  else
+    len = strlen (str);
+
+  if (len == 0)
+    {
+      error ("missing arch name in -march=%qs", str);
+      return;
+    }
+
+  /* Loop through the list of supported ARCHs to find a match.  */
+  for (arch = all_architectures; arch->name != NULL; arch++)
+    {
+      if (strlen (arch->name) == len && strncmp (arch->name, str, len) == 0)
+       {
+         selected_arch = arch;
+         aarch64_isa_flags = selected_arch->flags;
+         selected_cpu = &all_cores[selected_arch->core];
+
+         if (ext != NULL)
+           {
+             /* ARCH string contains at least one extension.  */
+             aarch64_parse_extension (ext);
+           }
+
+         return;
+       }
+    }
+
+  /* ARCH name not found in list.  */
+  error ("unknown value %qs for -march", str);
+  return;
+}
+
+/* Parse the CPU string.  */
+
+static void
+aarch64_parse_cpu (void)
+{
+  char *ext;
+  const struct processor *cpu;
+  char *str = (char *) alloca (strlen (aarch64_cpu_string) + 1);
+  size_t len;
+
+  strcpy (str, aarch64_cpu_string);
+
+  ext = strchr (str, '+');
+
+  if (ext != NULL)
+    len = ext - str;
+  else
+    len = strlen (str);
+
+  if (len == 0)
+    {
+      error ("missing cpu name in -mcpu=%qs", str);
+      return;
+    }
+
+  /* Loop through the list of supported CPUs to find a match.  */
+  for (cpu = all_cores; cpu->name != NULL; cpu++)
+    {
+      if (strlen (cpu->name) == len && strncmp (cpu->name, str, len) == 0)
+       {
+         selected_cpu = cpu;
+         aarch64_isa_flags = selected_cpu->flags;
+
+         if (ext != NULL)
+           {
+             /* CPU string contains at least one extension.  */
+             aarch64_parse_extension (ext);
+           }
+
+         return;
+       }
+    }
+
+  /* CPU name not found in list.  */
+  error ("unknown value %qs for -mcpu", str);
+  return;
+}
+
+/* Parse the TUNE string.  */
+
+static void
+aarch64_parse_tune (void)
+{
+  const struct processor *cpu;
+  char *str = (char *) alloca (strlen (aarch64_tune_string) + 1);
+  strcpy (str, aarch64_tune_string);
+
+  /* Loop through the list of supported CPUs to find a match.  */
+  for (cpu = all_cores; cpu->name != NULL; cpu++)
+    {
+      if (strcmp (cpu->name, str) == 0)
+       {
+         selected_tune = cpu;
+         return;
+       }
+    }
+
+  /* CPU name not found in list.  */
+  error ("unknown value %qs for -mtune", str);
+  return;
+}
+
+
+/* Implement TARGET_OPTION_OVERRIDE.  */
+
+static void
+aarch64_override_options (void)
+{
+  /* march wins over mcpu, so when march is defined, mcpu takes the same value,
+     otherwise march remains undefined.  mtune can be used with either march or
+     mcpu.  */
+
+  if (aarch64_arch_string)
+    {
+      aarch64_parse_arch ();
+      aarch64_cpu_string = NULL;
+    }
+
+  if (aarch64_cpu_string)
+    {
+      aarch64_parse_cpu ();
+      selected_arch = NULL;
+    }
+
+  if (aarch64_tune_string)
+    {
+      aarch64_parse_tune ();
+    }
+
+  initialize_aarch64_code_model ();
+
+  aarch64_build_bitmask_table ();
+
+  /* This target defaults to strict volatile bitfields.  */
+  if (flag_strict_volatile_bitfields < 0 && abi_version_at_least (2))
+    flag_strict_volatile_bitfields = 1;
+
+  /* If the user did not specify a processor, choose the default
+     one for them.  This will be the CPU set during configuration using
+     --with-cpu, otherwise it is "generic".  */
+  if (!selected_cpu)
+    {
+      selected_cpu = &all_cores[TARGET_CPU_DEFAULT & 0x3f];
+      aarch64_isa_flags = TARGET_CPU_DEFAULT >> 6;
+    }
+
+  gcc_assert (selected_cpu);
+
+  /* The selected cpu may be an architecture, so lookup tuning by core ID.  */
+  if (!selected_tune)
+    selected_tune = &all_cores[selected_cpu->core];
+
+  aarch64_tune_flags = selected_tune->flags;
+  aarch64_tune = selected_tune->core;
+  aarch64_tune_params = selected_tune->tune;
+
+  aarch64_override_options_after_change ();
+}
+
+/* Implement targetm.override_options_after_change.  */
+
+static void
+aarch64_override_options_after_change (void)
+{
+  faked_omit_frame_pointer = false;
+
+  /* To omit leaf frame pointers, we need to turn flag_omit_frame_pointer on so
+     that aarch64_frame_pointer_required will be called.  We need to remember
+     whether flag_omit_frame_pointer was turned on normally or just faked.  */
+
+  if (flag_omit_leaf_frame_pointer && !flag_omit_frame_pointer)
+    {
+      flag_omit_frame_pointer = true;
+      faked_omit_frame_pointer = true;
+    }
+}
+
+static struct machine_function *
+aarch64_init_machine_status (void)
+{
+  struct machine_function *machine;
+  machine = ggc_alloc_cleared_machine_function ();
+  return machine;
+}
+
+void
+aarch64_init_expanders (void)
+{
+  init_machine_status = aarch64_init_machine_status;
+}
+
+/* A checking mechanism for the implementation of the various code models.  */
+static void
+initialize_aarch64_code_model (void)
+{
+   if (flag_pic)
+     {
+       switch (aarch64_cmodel_var)
+        {
+        case AARCH64_CMODEL_TINY:
+          aarch64_cmodel = AARCH64_CMODEL_TINY_PIC;
+          break;
+        case AARCH64_CMODEL_SMALL:
+          aarch64_cmodel = AARCH64_CMODEL_SMALL_PIC;
+          break;
+        case AARCH64_CMODEL_LARGE:
+          sorry ("code model %qs with -f%s", "large",
+                 flag_pic > 1 ? "PIC" : "pic");
+        default:
+          gcc_unreachable ();
+        }
+     }
+   else
+     aarch64_cmodel = aarch64_cmodel_var;
+}
+
+/* Return true if SYMBOL_REF X binds locally.  */
+
+static bool
+aarch64_symbol_binds_local_p (const_rtx x)
+{
+  return (SYMBOL_REF_DECL (x)
+         ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
+         : SYMBOL_REF_LOCAL_P (x));
+}
+
+/* Return true if SYMBOL_REF X is thread local */
+static bool
+aarch64_tls_symbol_p (rtx x)
+{
+  if (! TARGET_HAVE_TLS)
+    return false;
+
+  if (GET_CODE (x) != SYMBOL_REF)
+    return false;
+
+  return SYMBOL_REF_TLS_MODEL (x) != 0;
+}
+
+/* Classify a TLS symbol into one of the TLS kinds.  */
+enum aarch64_symbol_type
+aarch64_classify_tls_symbol (rtx x)
+{
+  enum tls_model tls_kind = tls_symbolic_operand_type (x);
+
+  switch (tls_kind)
+    {
+    case TLS_MODEL_GLOBAL_DYNAMIC:
+    case TLS_MODEL_LOCAL_DYNAMIC:
+      return TARGET_TLS_DESC ? SYMBOL_SMALL_TLSDESC : SYMBOL_SMALL_TLSGD;
+
+    case TLS_MODEL_INITIAL_EXEC:
+      return SYMBOL_SMALL_GOTTPREL;
+
+    case TLS_MODEL_LOCAL_EXEC:
+      return SYMBOL_SMALL_TPREL;
+
+    case TLS_MODEL_EMULATED:
+    case TLS_MODEL_NONE:
+      return SYMBOL_FORCE_TO_MEM;
+
+    default:
+      gcc_unreachable ();
+    }
+}
+
+/* Return the method that should be used to access SYMBOL_REF or
+   LABEL_REF X in context CONTEXT.  */
+enum aarch64_symbol_type
+aarch64_classify_symbol (rtx x,
+                        enum aarch64_symbol_context context ATTRIBUTE_UNUSED)
+{
+  if (GET_CODE (x) == LABEL_REF)
+    {
+      switch (aarch64_cmodel)
+       {
+       case AARCH64_CMODEL_LARGE:
+         return SYMBOL_FORCE_TO_MEM;
+
+       case AARCH64_CMODEL_TINY_PIC:
+       case AARCH64_CMODEL_TINY:
+       case AARCH64_CMODEL_SMALL_PIC:
+       case AARCH64_CMODEL_SMALL:
+         return SYMBOL_SMALL_ABSOLUTE;
+
+       default:
+         gcc_unreachable ();
+       }
+    }
+
+  gcc_assert (GET_CODE (x) == SYMBOL_REF);
+
+  switch (aarch64_cmodel)
+    {
+    case AARCH64_CMODEL_LARGE:
+      return SYMBOL_FORCE_TO_MEM;
+
+    case AARCH64_CMODEL_TINY:
+    case AARCH64_CMODEL_SMALL:
+
+      /* This is needed to get DFmode, TImode constants to be loaded off
+         the constant pool.  Is it necessary to dump TImode values into
+         the constant pool.  We don't handle TImode constant loads properly
+         yet and hence need to use the constant pool.  */
+      if (CONSTANT_POOL_ADDRESS_P (x))
+       return SYMBOL_FORCE_TO_MEM;
+
+      if (aarch64_tls_symbol_p (x))
+       return aarch64_classify_tls_symbol (x);
+
+      if (SYMBOL_REF_WEAK (x))
+       return SYMBOL_FORCE_TO_MEM;
+
+      return SYMBOL_SMALL_ABSOLUTE;
+
+    case AARCH64_CMODEL_TINY_PIC:
+    case AARCH64_CMODEL_SMALL_PIC:
+
+      if (CONSTANT_POOL_ADDRESS_P (x))
+       return SYMBOL_FORCE_TO_MEM;
+
+      if (aarch64_tls_symbol_p (x))
+       return aarch64_classify_tls_symbol (x);
+
+      if (!aarch64_symbol_binds_local_p (x))
+       return SYMBOL_SMALL_GOT;
+
+      return SYMBOL_SMALL_ABSOLUTE;
+
+    default:
+      gcc_unreachable ();
+    }
+  /* By default push everything into the constant pool.  */
+  return SYMBOL_FORCE_TO_MEM;
+}
+
+/* Return true if X is a symbolic constant that can be used in context
+   CONTEXT.  If it is, store the type of the symbol in *SYMBOL_TYPE.  */
+
+bool
+aarch64_symbolic_constant_p (rtx x, enum aarch64_symbol_context context,
+                            enum aarch64_symbol_type *symbol_type)
+{
+  rtx offset;
+  split_const (x, &x, &offset);
+  if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
+    *symbol_type = aarch64_classify_symbol (x, context);
+  else
+    return false;
+
+  /* No checking of offset at this point.  */
+  return true;
+}
+
+bool
+aarch64_constant_address_p (rtx x)
+{
+  return (CONSTANT_P (x) && memory_address_p (DImode, x));
+}
+
+bool
+aarch64_legitimate_pic_operand_p (rtx x)
+{
+  if (GET_CODE (x) == SYMBOL_REF
+      || (GET_CODE (x) == CONST
+         && GET_CODE (XEXP (x, 0)) == PLUS
+         && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
+     return false;
+
+  return true;
+}
+
+static bool
+aarch64_legitimate_constant_p (enum machine_mode mode, rtx x)
+{
+  /* Do not allow vector struct mode constants.  We could support
+     0 and -1 easily, but they need support in aarch64-simd.md.  */
+  if (TARGET_SIMD && aarch64_vect_struct_mode_p (mode))
+    return false;
+
+  /* This could probably go away because
+     we now decompose CONST_INTs according to expand_mov_immediate.  */
+  if ((GET_CODE (x) == CONST_VECTOR
+       && aarch64_simd_valid_immediate (x, mode, false,
+                                       NULL, NULL, NULL, NULL, NULL) != -1)
+      || CONST_INT_P (x))
+    return !targetm.cannot_force_const_mem (mode, x);
+
+  if (GET_CODE (x) == HIGH
+      && aarch64_valid_symref (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
+    return true;
+
+  return aarch64_constant_address_p (x);
+}
+
+static void
+aarch64_init_builtins (void)
+{
+  tree ftype, decl = NULL;
+
+  ftype = build_function_type (ptr_type_node, void_list_node);
+  decl = add_builtin_function ("__builtin_thread_pointer", ftype,
+                              AARCH64_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
+                              NULL, NULL_TREE);
+  TREE_NOTHROW (decl) = 1;
+  TREE_READONLY (decl) = 1;
+
+  if (TARGET_SIMD)
+    init_aarch64_simd_builtins ();
+}
+
+static rtx
+aarch64_load_tp (rtx target)
+{
+  if (!target
+      || GET_MODE (target) != Pmode
+      || !register_operand (target, Pmode))
+    target = gen_reg_rtx (Pmode);
+
+  /* Can return in any reg.  */
+  emit_insn (gen_aarch64_load_tp_hard (target));
+  return target;
+}
+
+/* Expand an expression EXP that calls a built-in function,
+   with result going to TARGET if that's convenient.  */
+static rtx
+aarch64_expand_builtin (tree exp,
+                    rtx target,
+                    rtx subtarget ATTRIBUTE_UNUSED,
+                    enum machine_mode mode ATTRIBUTE_UNUSED,
+                    int ignore ATTRIBUTE_UNUSED)
+{
+  tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+  int fcode = DECL_FUNCTION_CODE (fndecl);
+
+  if (fcode == AARCH64_BUILTIN_THREAD_POINTER)
+    return aarch64_load_tp (target);
+
+  if (fcode >= AARCH64_SIMD_BUILTIN_BASE)
+    return aarch64_simd_expand_builtin (fcode, exp, target);
+
+  return NULL_RTX;
+}
+
+/* On AAPCS systems, this is the "struct __va_list".  */
+static GTY(()) tree va_list_type;
+
+/* Implement TARGET_BUILD_BUILTIN_VA_LIST.
+   Return the type to use as __builtin_va_list.
+
+   AAPCS64 \S 7.1.4 requires that va_list be a typedef for a type defined as:
+
+   struct __va_list
+   {
+     void *__stack;
+     void *__gr_top;
+     void *__vr_top;
+     int   __gr_offs;
+     int   __vr_offs;
+   };  */
+
+static tree
+aarch64_build_builtin_va_list (void)
+{
+  tree va_list_name;
+  tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
+
+  /* Create the type.  */
+  va_list_type = lang_hooks.types.make_type (RECORD_TYPE);
+  /* Give it the required name.  */
+  va_list_name = build_decl (BUILTINS_LOCATION,
+                            TYPE_DECL,
+                            get_identifier ("__va_list"),
+                            va_list_type);
+  DECL_ARTIFICIAL (va_list_name) = 1;
+  TYPE_NAME (va_list_type) = va_list_name;
+
+  /* Create the fields.  */
+  f_stack = build_decl (BUILTINS_LOCATION,
+                       FIELD_DECL, get_identifier ("__stack"),
+                       ptr_type_node);
+  f_grtop = build_decl (BUILTINS_LOCATION,
+                       FIELD_DECL, get_identifier ("__gr_top"),
+                       ptr_type_node);
+  f_vrtop = build_decl (BUILTINS_LOCATION,
+                       FIELD_DECL, get_identifier ("__vr_top"),
+                       ptr_type_node);
+  f_groff = build_decl (BUILTINS_LOCATION,
+                       FIELD_DECL, get_identifier ("__gr_offs"),
+                       integer_type_node);
+  f_vroff = build_decl (BUILTINS_LOCATION,
+                       FIELD_DECL, get_identifier ("__vr_offs"),
+                       integer_type_node);
+
+  DECL_ARTIFICIAL (f_stack) = 1;
+  DECL_ARTIFICIAL (f_grtop) = 1;
+  DECL_ARTIFICIAL (f_vrtop) = 1;
+  DECL_ARTIFICIAL (f_groff) = 1;
+  DECL_ARTIFICIAL (f_vroff) = 1;
+
+  DECL_FIELD_CONTEXT (f_stack) = va_list_type;
+  DECL_FIELD_CONTEXT (f_grtop) = va_list_type;
+  DECL_FIELD_CONTEXT (f_vrtop) = va_list_type;
+  DECL_FIELD_CONTEXT (f_groff) = va_list_type;
+  DECL_FIELD_CONTEXT (f_vroff) = va_list_type;
+
+  TYPE_FIELDS (va_list_type) = f_stack;
+  DECL_CHAIN (f_stack) = f_grtop;
+  DECL_CHAIN (f_grtop) = f_vrtop;
+  DECL_CHAIN (f_vrtop) = f_groff;
+  DECL_CHAIN (f_groff) = f_vroff;
+
+  /* Compute its layout.  */
+  layout_type (va_list_type);
+
+  return va_list_type;
+}
+
+/* Implement TARGET_EXPAND_BUILTIN_VA_START.  */
+static void
+aarch64_expand_builtin_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
+{
+  const CUMULATIVE_ARGS *cum;
+  tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
+  tree stack, grtop, vrtop, groff, vroff;
+  tree t;
+  int gr_save_area_size;
+  int vr_save_area_size;
+  int vr_offset;
+
+  cum = &crtl->args.info;
+  gr_save_area_size
+    = (NUM_ARG_REGS - cum->aapcs_ncrn) * UNITS_PER_WORD;
+  vr_save_area_size
+    = (NUM_FP_ARG_REGS - cum->aapcs_nvrn) * UNITS_PER_VREG;
+
+  if (TARGET_GENERAL_REGS_ONLY)
+    {
+      if (cum->aapcs_nvrn > 0)
+       sorry ("%qs and floating point or vector arguments",
+              "-mgeneral-regs-only");
+      vr_save_area_size = 0;
+    }
+
+  f_stack = TYPE_FIELDS (va_list_type_node);
+  f_grtop = DECL_CHAIN (f_stack);
+  f_vrtop = DECL_CHAIN (f_grtop);
+  f_groff = DECL_CHAIN (f_vrtop);
+  f_vroff = DECL_CHAIN (f_groff);
+
+  stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), valist, f_stack,
+                 NULL_TREE);
+  grtop = build3 (COMPONENT_REF, TREE_TYPE (f_grtop), valist, f_grtop,
+                 NULL_TREE);
+  vrtop = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop), valist, f_vrtop,
+                 NULL_TREE);
+  groff = build3 (COMPONENT_REF, TREE_TYPE (f_groff), valist, f_groff,
+                 NULL_TREE);
+  vroff = build3 (COMPONENT_REF, TREE_TYPE (f_vroff), valist, f_vroff,
+                 NULL_TREE);
+
+  /* Emit code to initialize STACK, which points to the next varargs stack
+     argument.  CUM->AAPCS_STACK_SIZE gives the number of stack words used
+     by named arguments.  STACK is 8-byte aligned.  */
+  t = make_tree (TREE_TYPE (stack), virtual_incoming_args_rtx);
+  if (cum->aapcs_stack_size > 0)
+    t = fold_build_pointer_plus_hwi (t, cum->aapcs_stack_size * UNITS_PER_WORD);
+  t = build2 (MODIFY_EXPR, TREE_TYPE (stack), stack, t);
+  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+  /* Emit code to initialize GRTOP, the top of the GR save area.
+     virtual_incoming_args_rtx should have been 16 byte aligned.  */
+  t = make_tree (TREE_TYPE (grtop), virtual_incoming_args_rtx);
+  t = build2 (MODIFY_EXPR, TREE_TYPE (grtop), grtop, t);
+  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+  /* Emit code to initialize VRTOP, the top of the VR save area.
+     This address is gr_save_area_bytes below GRTOP, rounded
+     down to the next 16-byte boundary.  */
+  t = make_tree (TREE_TYPE (vrtop), virtual_incoming_args_rtx);
+  vr_offset = AARCH64_ROUND_UP (gr_save_area_size,
+                            STACK_BOUNDARY / BITS_PER_UNIT);
+
+  if (vr_offset)
+    t = fold_build_pointer_plus_hwi (t, -vr_offset);
+  t = build2 (MODIFY_EXPR, TREE_TYPE (vrtop), vrtop, t);
+  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+  /* Emit code to initialize GROFF, the offset from GRTOP of the
+     next GPR argument.  */
+  t = build2 (MODIFY_EXPR, TREE_TYPE (groff), groff,
+             build_int_cst (TREE_TYPE (groff), -gr_save_area_size));
+  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+  /* Likewise emit code to initialize VROFF, the offset from FTOP
+     of the next VR argument.  */
+  t = build2 (MODIFY_EXPR, TREE_TYPE (vroff), vroff,
+             build_int_cst (TREE_TYPE (vroff), -vr_save_area_size));
+  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+}
+
+/* Implement TARGET_GIMPLIFY_VA_ARG_EXPR.  */
+
+static tree
+aarch64_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
+                             gimple_seq *post_p ATTRIBUTE_UNUSED)
+{
+  tree addr;
+  bool indirect_p;
+  bool is_ha;          /* is HFA or HVA.  */
+  bool dw_align;       /* double-word align.  */
+  enum machine_mode ag_mode = VOIDmode;
+  int nregs;
+  enum machine_mode mode;
+
+  tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
+  tree stack, f_top, f_off, off, arg, roundup, on_stack;
+  HOST_WIDE_INT size, rsize, adjust, align;
+  tree t, u, cond1, cond2;
+
+  indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
+  if (indirect_p)
+    type = build_pointer_type (type);
+
+  mode = TYPE_MODE (type);
+
+  f_stack = TYPE_FIELDS (va_list_type_node);
+  f_grtop = DECL_CHAIN (f_stack);
+  f_vrtop = DECL_CHAIN (f_grtop);
+  f_groff = DECL_CHAIN (f_vrtop);
+  f_vroff = DECL_CHAIN (f_groff);
+
+  stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), unshare_expr (valist),
+                 f_stack, NULL_TREE);
+  size = int_size_in_bytes (type);
+  align = aarch64_function_arg_alignment (mode, type) / BITS_PER_UNIT;
+
+  dw_align = false;
+  adjust = 0;
+  if (aarch64_vfp_is_call_or_return_candidate (mode,
+                                              type,
+                                              &ag_mode,
+                                              &nregs,
+                                              &is_ha))
+    {
+      /* TYPE passed in fp/simd registers.  */
+      if (TARGET_GENERAL_REGS_ONLY)
+       sorry ("%qs and floating point or vector arguments",
+              "-mgeneral-regs-only");
+
+      f_top = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop),
+                     unshare_expr (valist), f_vrtop, NULL_TREE);
+      f_off = build3 (COMPONENT_REF, TREE_TYPE (f_vroff),
+                     unshare_expr (valist), f_vroff, NULL_TREE);
+
+      rsize = nregs * UNITS_PER_VREG;
+
+      if (is_ha)
+       {
+         if (BYTES_BIG_ENDIAN && GET_MODE_SIZE (ag_mode) < UNITS_PER_VREG)
+           adjust = UNITS_PER_VREG - GET_MODE_SIZE (ag_mode);
+       }
+      else if (BLOCK_REG_PADDING (mode, type, 1) == downward
+              && size < UNITS_PER_VREG)
+       {
+         adjust = UNITS_PER_VREG - size;
+       }
+    }
+  else
+    {
+      /* TYPE passed in general registers.  */
+      f_top = build3 (COMPONENT_REF, TREE_TYPE (f_grtop),
+                     unshare_expr (valist), f_grtop, NULL_TREE);
+      f_off = build3 (COMPONENT_REF, TREE_TYPE (f_groff),
+                     unshare_expr (valist), f_groff, NULL_TREE);
+      rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
+      nregs = rsize / UNITS_PER_WORD;
+
+      if (align > 8)
+       dw_align = true;
+
+      if (BLOCK_REG_PADDING (mode, type, 1) == downward
+         && size < UNITS_PER_WORD)
+       {
+         adjust = UNITS_PER_WORD  - size;
+       }
+    }
+
+  /* Get a local temporary for the field value.  */
+  off = get_initialized_tmp_var (f_off, pre_p, NULL);
+
+  /* Emit code to branch if off >= 0.  */
+  t = build2 (GE_EXPR, boolean_type_node, off,
+             build_int_cst (TREE_TYPE (off), 0));
+  cond1 = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
+
+  if (dw_align)
+    {
+      /* Emit: offs = (offs + 15) & -16.  */
+      t = build2 (PLUS_EXPR, TREE_TYPE (off), off,
+                 build_int_cst (TREE_TYPE (off), 15));
+      t = build2 (BIT_AND_EXPR, TREE_TYPE (off), t,
+                 build_int_cst (TREE_TYPE (off), -16));
+      roundup = build2 (MODIFY_EXPR, TREE_TYPE (off), off, t);
+    }
+  else
+    roundup = NULL;
+
+  /* Update ap.__[g|v]r_offs  */
+  t = build2 (PLUS_EXPR, TREE_TYPE (off), off,
+             build_int_cst (TREE_TYPE (off), rsize));
+  t = build2 (MODIFY_EXPR, TREE_TYPE (f_off), unshare_expr (f_off), t);
+
+  /* String up.  */
+  if (roundup)
+    t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t);
+
+  /* [cond2] if (ap.__[g|v]r_offs > 0)  */
+  u = build2 (GT_EXPR, boolean_type_node, unshare_expr (f_off),
+             build_int_cst (TREE_TYPE (f_off), 0));
+  cond2 = build3 (COND_EXPR, ptr_type_node, u, NULL_TREE, NULL_TREE);
+
+  /* String up: make sure the assignment happens before the use.  */
+  t = build2 (COMPOUND_EXPR, TREE_TYPE (cond2), t, cond2);
+  COND_EXPR_ELSE (cond1) = t;
+
+  /* Prepare the trees handling the argument that is passed on the stack;
+     the top level node will store in ON_STACK.  */
+  arg = get_initialized_tmp_var (stack, pre_p, NULL);
+  if (align > 8)
+    {
+      /* if (alignof(type) > 8) (arg = arg + 15) & -16;  */
+      t = fold_convert (intDI_type_node, arg);
+      t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
+                 build_int_cst (TREE_TYPE (t), 15));
+      t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
+                 build_int_cst (TREE_TYPE (t), -16));
+      t = fold_convert (TREE_TYPE (arg), t);
+      roundup = build2 (MODIFY_EXPR, TREE_TYPE (arg), arg, t);
+    }
+  else
+    roundup = NULL;
+  /* Advance ap.__stack  */
+  t = fold_convert (intDI_type_node, arg);
+  t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
+             build_int_cst (TREE_TYPE (t), size + 7));
+  t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
+             build_int_cst (TREE_TYPE (t), -8));
+  t = fold_convert (TREE_TYPE (arg), t);
+  t = build2 (MODIFY_EXPR, TREE_TYPE (stack), unshare_expr (stack), t);
+  /* String up roundup and advance.  */
+  if (roundup)
+    t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t);
+  /* String up with arg */
+  on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), t, arg);
+  /* Big-endianness related address adjustment.  */
+  if (BLOCK_REG_PADDING (mode, type, 1) == downward
+      && size < UNITS_PER_WORD)
+  {
+    t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (arg), arg,
+               size_int (UNITS_PER_WORD - size));
+    on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), on_stack, t);
+  }
+
+  COND_EXPR_THEN (cond1) = unshare_expr (on_stack);
+  COND_EXPR_THEN (cond2) = unshare_expr (on_stack);
+
+  /* Adjustment to OFFSET in the case of BIG_ENDIAN.  */
+  t = off;
+  if (adjust)
+    t = build2 (PREINCREMENT_EXPR, TREE_TYPE (off), off,
+               build_int_cst (TREE_TYPE (off), adjust));
+
+  t = fold_convert (sizetype, t);
+  t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (f_top), f_top, t);
+
+  if (is_ha)
+    {
+      /* type ha; // treat as "struct {ftype field[n];}"
+         ... [computing offs]
+         for (i = 0; i <nregs; ++i, offs += 16)
+          ha.field[i] = *((ftype *)(ap.__vr_top + offs));
+        return ha;  */
+      int i;
+      tree tmp_ha, field_t, field_ptr_t;
+
+      /* Declare a local variable.  */
+      tmp_ha = create_tmp_var_raw (type, "ha");
+      gimple_add_tmp_var (tmp_ha);
+
+      /* Establish the base type.  */
+      switch (ag_mode)
+       {
+       case SFmode:
+         field_t = float_type_node;
+         field_ptr_t = float_ptr_type_node;
+         break;
+       case DFmode:
+         field_t = double_type_node;
+         field_ptr_t = double_ptr_type_node;
+         break;
+       case TFmode:
+         field_t = long_double_type_node;
+         field_ptr_t = long_double_ptr_type_node;
+         break;
+/* The half precision and quad precision are not fully supported yet.  Enable
+   the following code after the support is complete.  Need to find the correct
+   type node for __fp16 *.  */
+#if 0
+       case HFmode:
+         field_t = float_type_node;
+         field_ptr_t = float_ptr_type_node;
+         break;
+#endif
+       case V2SImode:
+       case V4SImode:
+           {
+             tree innertype = make_signed_type (GET_MODE_PRECISION (SImode));
+             field_t = build_vector_type_for_mode (innertype, ag_mode);
+             field_ptr_t = build_pointer_type (field_t);
+           }
+         break;
+       default:
+         gcc_assert (0);
+       }
+
+      /* *(field_ptr_t)&ha = *((field_ptr_t)vr_saved_area  */
+      tmp_ha = build1 (ADDR_EXPR, field_ptr_t, tmp_ha);
+      addr = t;
+      t = fold_convert (field_ptr_t, addr);
+      t = build2 (MODIFY_EXPR, field_t,
+                 build1 (INDIRECT_REF, field_t, tmp_ha),
+                 build1 (INDIRECT_REF, field_t, t));
+
+      /* ha.field[i] = *((field_ptr_t)vr_saved_area + i)  */
+      for (i = 1; i < nregs; ++i)
+       {
+         addr = fold_build_pointer_plus_hwi (addr, UNITS_PER_VREG);
+         u = fold_convert (field_ptr_t, addr);
+         u = build2 (MODIFY_EXPR, field_t,
+                     build2 (MEM_REF, field_t, tmp_ha,
+                             build_int_cst (field_ptr_t,
+                                            (i *
+                                             int_size_in_bytes (field_t)))),
+                     build1 (INDIRECT_REF, field_t, u));
+         t = build2 (COMPOUND_EXPR, TREE_TYPE (t), t, u);
+       }
+
+      u = fold_convert (TREE_TYPE (f_top), tmp_ha);
+      t = build2 (COMPOUND_EXPR, TREE_TYPE (f_top), t, u);
+    }
+
+  COND_EXPR_ELSE (cond2) = t;
+  addr = fold_convert (build_pointer_type (type), cond1);
+  addr = build_va_arg_indirect_ref (addr);
+
+  if (indirect_p)
+    addr = build_va_arg_indirect_ref (addr);
+
+  return addr;
+}
+
+/* Implement TARGET_SETUP_INCOMING_VARARGS.  */
+
+static void
+aarch64_setup_incoming_varargs (cumulative_args_t cum_v, enum machine_mode mode,
+                               tree type, int *pretend_size ATTRIBUTE_UNUSED,
+                               int no_rtl)
+{
+  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+  CUMULATIVE_ARGS local_cum;
+  int gr_saved, vr_saved;
+
+  /* The caller has advanced CUM up to, but not beyond, the last named
+     argument.  Advance a local copy of CUM past the last "real" named
+     argument, to find out how many registers are left over.  */
+  local_cum = *cum;
+  aarch64_function_arg_advance (pack_cumulative_args(&local_cum), mode, type, true);
+
+  /* Found out how many registers we need to save.  */
+  gr_saved = NUM_ARG_REGS - local_cum.aapcs_ncrn;
+  vr_saved = NUM_FP_ARG_REGS - local_cum.aapcs_nvrn;
+
+  if (TARGET_GENERAL_REGS_ONLY)
+    {
+      if (local_cum.aapcs_nvrn > 0)
+       sorry ("%qs and floating point or vector arguments",
+              "-mgeneral-regs-only");
+      vr_saved = 0;
+    }
+
+  if (!no_rtl)
+    {
+      if (gr_saved > 0)
+       {
+         rtx ptr, mem;
+
+         /* virtual_incoming_args_rtx should have been 16-byte aligned.  */
+         ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
+                              - gr_saved * UNITS_PER_WORD);
+         mem = gen_frame_mem (BLKmode, ptr);
+         set_mem_alias_set (mem, get_varargs_alias_set ());
+
+         move_block_from_reg (local_cum.aapcs_ncrn + R0_REGNUM,
+                              mem, gr_saved);
+       }
+      if (vr_saved > 0)
+       {
+         /* We can't use move_block_from_reg, because it will use
+            the wrong mode, storing D regs only.  */
+         enum machine_mode mode = TImode;
+         int off, i;
+
+         /* Set OFF to the offset from virtual_incoming_args_rtx of
+            the first vector register.  The VR save area lies below
+            the GR one, and is aligned to 16 bytes.  */
+         off = -AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD,
+                                  STACK_BOUNDARY / BITS_PER_UNIT);
+         off -= vr_saved * UNITS_PER_VREG;
+
+         for (i = local_cum.aapcs_nvrn; i < NUM_FP_ARG_REGS; ++i)
+           {
+             rtx ptr, mem;
+
+             ptr = plus_constant (Pmode, virtual_incoming_args_rtx, off);
+             mem = gen_frame_mem (mode, ptr);
+             set_mem_alias_set (mem, get_varargs_alias_set ());
+             aarch64_emit_move (mem, gen_rtx_REG (mode, V0_REGNUM + i));
+             off += UNITS_PER_VREG;
+           }
+       }
+    }
+
+  /* We don't save the size into *PRETEND_SIZE because we want to avoid
+     any complication of having crtl->args.pretend_args_size changed.  */
+  cfun->machine->saved_varargs_size
+    = (AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD,
+                     STACK_BOUNDARY / BITS_PER_UNIT)
+       + vr_saved * UNITS_PER_VREG);
+}
+
+static void
+aarch64_conditional_register_usage (void)
+{
+  int i;
+  if (!TARGET_FLOAT)
+    {
+      for (i = V0_REGNUM; i <= V31_REGNUM; i++)
+       {
+         fixed_regs[i] = 1;
+         call_used_regs[i] = 1;
+       }
+    }
+}
+
+/* Walk down the type tree of TYPE counting consecutive base elements.
+   If *MODEP is VOIDmode, then set it to the first valid floating point
+   type.  If a non-floating point type is found, or if a floating point
+   type that doesn't match a non-VOIDmode *MODEP is found, then return -1,
+   otherwise return the count in the sub-tree.  */
+static int
+aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
+{
+  enum machine_mode mode;
+  HOST_WIDE_INT size;
+
+  switch (TREE_CODE (type))
+    {
+    case REAL_TYPE:
+      mode = TYPE_MODE (type);
+      if (mode != DFmode && mode != SFmode && mode != TFmode)
+       return -1;
+
+      if (*modep == VOIDmode)
+       *modep = mode;
+
+      if (*modep == mode)
+       return 1;
+
+      break;
+
+    case COMPLEX_TYPE:
+      mode = TYPE_MODE (TREE_TYPE (type));
+      if (mode != DFmode && mode != SFmode && mode != TFmode)
+       return -1;
+
+      if (*modep == VOIDmode)
+       *modep = mode;
+
+      if (*modep == mode)
+       return 2;
+
+      break;
+
+    case VECTOR_TYPE:
+      /* Use V2SImode and V4SImode as representatives of all 64-bit
+        and 128-bit vector types.  */
+      size = int_size_in_bytes (type);
+      switch (size)
+       {
+       case 8:
+         mode = V2SImode;
+         break;
+       case 16:
+         mode = V4SImode;
+         break;
+       default:
+         return -1;
+       }
+
+      if (*modep == VOIDmode)
+       *modep = mode;
+
+      /* Vector modes are considered to be opaque: two vectors are
+        equivalent for the purposes of being homogeneous aggregates
+        if they are the same size.  */
+      if (*modep == mode)
+       return 1;
+
+      break;
+
+    case ARRAY_TYPE:
+      {
+       int count;
+       tree index = TYPE_DOMAIN (type);
+
+       /* Can't handle incomplete types.  */
+       if (!COMPLETE_TYPE_P (type))
+         return -1;
+
+       count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep);
+       if (count == -1
+           || !index
+           || !TYPE_MAX_VALUE (index)
+           || !host_integerp (TYPE_MAX_VALUE (index), 1)
+           || !TYPE_MIN_VALUE (index)
+           || !host_integerp (TYPE_MIN_VALUE (index), 1)
+           || count < 0)
+         return -1;
+
+       count *= (1 + tree_low_cst (TYPE_MAX_VALUE (index), 1)
+                     - tree_low_cst (TYPE_MIN_VALUE (index), 1));
+
+       /* There must be no padding.  */
+       if (!host_integerp (TYPE_SIZE (type), 1)
+           || (tree_low_cst (TYPE_SIZE (type), 1)
+               != count * GET_MODE_BITSIZE (*modep)))
+         return -1;
+
+       return count;
+      }
+
+    case RECORD_TYPE:
+      {
+       int count = 0;
+       int sub_count;
+       tree field;
+
+       /* Can't handle incomplete types.  */
+       if (!COMPLETE_TYPE_P (type))
+         return -1;
+
+       for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+         {
+           if (TREE_CODE (field) != FIELD_DECL)
+             continue;
+
+           sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
+           if (sub_count < 0)
+             return -1;
+           count += sub_count;
+         }
+
+       /* There must be no padding.  */
+       if (!host_integerp (TYPE_SIZE (type), 1)
+           || (tree_low_cst (TYPE_SIZE (type), 1)
+               != count * GET_MODE_BITSIZE (*modep)))
+         return -1;
+
+       return count;
+      }
+
+    case UNION_TYPE:
+    case QUAL_UNION_TYPE:
+      {
+       /* These aren't very interesting except in a degenerate case.  */
+       int count = 0;
+       int sub_count;
+       tree field;
+
+       /* Can't handle incomplete types.  */
+       if (!COMPLETE_TYPE_P (type))
+         return -1;
+
+       for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+         {
+           if (TREE_CODE (field) != FIELD_DECL)
+             continue;
+
+           sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
+           if (sub_count < 0)
+             return -1;
+           count = count > sub_count ? count : sub_count;
+         }
+
+       /* There must be no padding.  */
+       if (!host_integerp (TYPE_SIZE (type), 1)
+           || (tree_low_cst (TYPE_SIZE (type), 1)
+               != count * GET_MODE_BITSIZE (*modep)))
+         return -1;
+
+       return count;
+      }
+
+    default:
+      break;
+    }
+
+  return -1;
+}
+
+/* Return TRUE if the type, as described by TYPE and MODE, is a composite
+   type as described in AAPCS64 \S 4.3.  This includes aggregate, union and
+   array types.  The C99 floating-point complex types are also considered
+   as composite types, according to AAPCS64 \S 7.1.1.  The complex integer
+   types, which are GCC extensions and out of the scope of AAPCS64, are
+   treated as composite types here as well.
+
+   Note that MODE itself is not sufficient in determining whether a type
+   is such a composite type or not.  This is because
+   stor-layout.c:compute_record_mode may have already changed the MODE
+   (BLKmode) of a RECORD_TYPE TYPE to some other mode.  For example, a
+   structure with only one field may have its MODE set to the mode of the
+   field.  Also an integer mode whose size matches the size of the
+   RECORD_TYPE type may be used to substitute the original mode
+   (i.e. BLKmode) in certain circumstances.  In other words, MODE cannot be
+   solely relied on.  */
+
+static bool
+aarch64_composite_type_p (const_tree type,
+                         enum machine_mode mode)
+{
+  if (type && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE))
+    return true;
+
+  if (mode == BLKmode
+      || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
+      || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
+    return true;
+
+  return false;
+}
+
+/* Return TRUE if the type, as described by TYPE and MODE, is a short vector
+   type as described in AAPCS64 \S 4.1.2.
+
+   See the comment above aarch64_composite_type_p for the notes on MODE.  */
+
+static bool
+aarch64_short_vector_p (const_tree type,
+                       enum machine_mode mode)
+{
+  HOST_WIDE_INT size = -1;
+
+  if (type && TREE_CODE (type) == VECTOR_TYPE)
+    size = int_size_in_bytes (type);
+  else if (!aarch64_composite_type_p (type, mode)
+          && (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
+              || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT))
+    size = GET_MODE_SIZE (mode);
+
+  return (size == 8 || size == 16) ? true : false;
+}
+
+/* Return TRUE if an argument, whose type is described by TYPE and MODE,
+   shall be passed or returned in simd/fp register(s) (providing these
+   parameter passing registers are available).
+
+   Upon successful return, *COUNT returns the number of needed registers,
+   *BASE_MODE returns the mode of the individual register and when IS_HAF
+   is not NULL, *IS_HA indicates whether or not the argument is a homogeneous
+   floating-point aggregate or a homogeneous short-vector aggregate.  */
+
+static bool
+aarch64_vfp_is_call_or_return_candidate (enum machine_mode mode,
+                                        const_tree type,
+                                        enum machine_mode *base_mode,
+                                        int *count,
+                                        bool *is_ha)
+{
+  enum machine_mode new_mode = VOIDmode;
+  bool composite_p = aarch64_composite_type_p (type, mode);
+
+  if (is_ha != NULL) *is_ha = false;
+
+  if ((!composite_p && GET_MODE_CLASS (mode) == MODE_FLOAT)
+      || aarch64_short_vector_p (type, mode))
+    {
+      *count = 1;
+      new_mode = mode;
+    }
+  else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+    {
+      if (is_ha != NULL) *is_ha = true;
+      *count = 2;
+      new_mode = GET_MODE_INNER (mode);
+    }
+  else if (type && composite_p)
+    {
+      int ag_count = aapcs_vfp_sub_candidate (type, &new_mode);
+
+      if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
+       {
+         if (is_ha != NULL) *is_ha = true;
+         *count = ag_count;
+       }
+      else
+       return false;
+    }
+  else
+    return false;
+
+  *base_mode = new_mode;
+  return true;
+}
+
+/* Implement TARGET_STRUCT_VALUE_RTX.  */
+
+static rtx
+aarch64_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
+                         int incoming ATTRIBUTE_UNUSED)
+{
+  return gen_rtx_REG (Pmode, AARCH64_STRUCT_VALUE_REGNUM);
+}
+
+/* Implements target hook vector_mode_supported_p.  */
+static bool
+aarch64_vector_mode_supported_p (enum machine_mode mode)
+{
+  if (TARGET_SIMD
+      && (mode == V4SImode  || mode == V8HImode
+         || mode == V16QImode || mode == V2DImode
+         || mode == V2SImode  || mode == V4HImode
+         || mode == V8QImode || mode == V2SFmode
+         || mode == V4SFmode || mode == V2DFmode))
+    return true;
+
+  return false;
+}
+
+/* Return quad mode as the preferred SIMD mode.  */
+static enum machine_mode
+aarch64_preferred_simd_mode (enum machine_mode mode)
+{
+  if (TARGET_SIMD)
+    switch (mode)
+      {
+      case DFmode:
+        return V2DFmode;
+      case SFmode:
+        return V4SFmode;
+      case SImode:
+        return V4SImode;
+      case HImode:
+        return V8HImode;
+      case QImode:
+        return V16QImode;
+      case DImode:
+          return V2DImode;
+        break;
+
+      default:;
+      }
+  return word_mode;
+}
+
+/* Legitimize a memory reference for sync primitive implemented using
+   LDXR/STXR instructions.  We currently force the form of the reference
+   to be indirect without offset.  */
+static rtx
+aarch64_legitimize_sync_memory (rtx memory)
+{
+  rtx addr = force_reg (Pmode, XEXP (memory, 0));
+  rtx legitimate_memory = gen_rtx_MEM (GET_MODE (memory), addr);
+
+  set_mem_alias_set (legitimate_memory, ALIAS_SET_MEMORY_BARRIER);
+  MEM_VOLATILE_P (legitimate_memory) = MEM_VOLATILE_P (memory);
+  return legitimate_memory;
+}
+
+/* An instruction emitter.  */
+typedef void (* emit_f) (int label, const char *, rtx *);
+
+/* An instruction emitter that emits via the conventional
+   output_asm_insn.  */
+static void
+aarch64_emit (int label ATTRIBUTE_UNUSED, const char *pattern, rtx *operands)
+{
+  output_asm_insn (pattern, operands);
+}
+
+/* Count the number of emitted synchronization instructions.  */
+static unsigned aarch64_insn_count;
+
+/* An emitter that counts emitted instructions but does not actually
+   emit instruction into the the instruction stream.  */
+static void
+aarch64_count (int label,
+              const char *pattern ATTRIBUTE_UNUSED,
+              rtx *operands ATTRIBUTE_UNUSED)
+{
+  if (! label)
+    ++ aarch64_insn_count;
+}
+
+static void
+aarch64_output_asm_insn (emit_f, int, rtx *,
+                        const char *, ...) ATTRIBUTE_PRINTF_4;
+
+/* Construct a pattern using conventional output formatting and feed
+   it to output_asm_insn.  Provides a mechanism to construct the
+   output pattern on the fly.  Note the hard limit on the pattern
+   buffer size.  */
+static void
+aarch64_output_asm_insn (emit_f emit, int label, rtx *operands,
+                        const char *pattern, ...)
+{
+  va_list ap;
+  char buffer[256];
+
+  va_start (ap, pattern);
+  vsnprintf (buffer, sizeof (buffer), pattern, ap);
+  va_end (ap);
+  emit (label, buffer, operands);
+}
+
+/* Helper to figure out the instruction suffix required on LDXR/STXR
+   instructions for operations on an object of the specified mode.  */
+static const char *
+aarch64_load_store_suffix (enum machine_mode mode)
+{
+  switch (mode)
+    {
+    case QImode: return "b";
+    case HImode: return "h";
+    case SImode: return "";
+    case DImode: return "";
+    default:
+      gcc_unreachable ();
+    }
+  return "";
+}
+
+/* Emit an excluive load instruction appropriate for the specified
+   mode.  */
+static void
+aarch64_output_sync_load (emit_f emit,
+                         enum machine_mode mode,
+                         rtx target,
+                         rtx memory,
+                         bool with_barrier)
+{
+  const char *suffix = aarch64_load_store_suffix (mode);
+  rtx operands[2];
+
+  operands[0] = target;
+  operands[1] = memory;
+  aarch64_output_asm_insn (emit, 0, operands, "ld%sxr%s\t%%%s0, %%1",
+                          with_barrier ? "a" : "", suffix,
+                          mode == DImode ? "x" : "w");
+}
+
+/* Emit an exclusive store instruction appropriate for the specified
+   mode.  */
+static void
+aarch64_output_sync_store (emit_f emit,
+                          enum machine_mode mode,
+                          rtx result,
+                          rtx value,
+                          rtx memory,
+                          bool with_barrier)
+{
+  const char *suffix = aarch64_load_store_suffix (mode);
+  rtx operands[3];
+
+  operands[0] = result;
+  operands[1] = value;
+  operands[2] = memory;
+  aarch64_output_asm_insn (emit, 0, operands,
+                          "st%sxr%s\t%%w0, %%%s1, %%2",
+                          with_barrier ? "l" : "",
+                          suffix,
+                          mode == DImode ? "x" : "w");
+}
+
+/* Helper to emit a two operand instruction.  */
+static void
+aarch64_output_op2 (emit_f emit, const char *mnemonic, rtx d, rtx s)
+{
+  rtx operands[2];
+  enum machine_mode mode;
+  const char *constraint;
+
+  mode = GET_MODE (d);
+  operands[0] = d;
+  operands[1] = s;
+  constraint = mode == DImode ? "" : "w";
+  aarch64_output_asm_insn (emit, 0, operands, "%s\t%%%s0, %%%s1", mnemonic,
+                          constraint, constraint);
+}
+
+/* Helper to emit a three operand instruction.  */
+static void
+aarch64_output_op3 (emit_f emit, const char *mnemonic, rtx d, rtx a, rtx b)
+{
+  rtx operands[3];
+  enum machine_mode mode;
+  const char *constraint;
+
+  mode = GET_MODE (d);
+  operands[0] = d;
+  operands[1] = a;
+  operands[2] = b;
+
+  constraint = mode == DImode ? "" : "w";
+  aarch64_output_asm_insn (emit, 0, operands, "%s\t%%%s0, %%%s1, %%%s2",
+                          mnemonic, constraint, constraint, constraint);
+}
+
+/* Emit a load store exclusive synchronization loop.
+
+   do
+     old_value = [mem]
+     if old_value != required_value
+       break;
+     t1 = sync_op (old_value, new_value)
+     [mem] = t1, t2 = [0|1]
+   while ! t2
+
+   Note:
+     t1 == t2 is not permitted
+     t1 == old_value is permitted
+
+   required_value:
+
+   RTX register or const_int representing the required old_value for
+   the modify to continue, if NULL no comparsion is performed.  */
+static void
+aarch64_output_sync_loop (emit_f emit,
+                         enum machine_mode mode,
+                         rtx old_value,
+                         rtx memory,
+                         rtx required_value,
+                         rtx new_value,
+                         rtx t1,
+                         rtx t2,
+                         enum attr_sync_op sync_op,
+                         int acquire_barrier,
+                         int release_barrier)
+{
+  rtx operands[1];
+
+  gcc_assert (t1 != t2);
+
+  aarch64_output_asm_insn (emit, 1, operands, "%sLSYT%%=:", LOCAL_LABEL_PREFIX);
+
+  aarch64_output_sync_load (emit, mode, old_value, memory, acquire_barrier);
+
+  if (required_value)
+    {
+      rtx operands[2];
+
+      operands[0] = old_value;
+      operands[1] = required_value;
+      aarch64_output_asm_insn (emit, 0, operands, "cmp\t%%0, %%1");
+      aarch64_output_asm_insn (emit, 0, operands, "bne\t%sLSYB%%=",
+                              LOCAL_LABEL_PREFIX);
+    }
+
+  switch (sync_op)
+    {
+    case SYNC_OP_ADD:
+      aarch64_output_op3 (emit, "add", t1, old_value, new_value);
+      break;
+
+    case SYNC_OP_SUB:
+      aarch64_output_op3 (emit, "sub", t1, old_value, new_value);
+      break;
+
+    case SYNC_OP_IOR:
+      aarch64_output_op3 (emit, "orr", t1, old_value, new_value);
+      break;
+
+    case SYNC_OP_XOR:
+      aarch64_output_op3 (emit, "eor", t1, old_value, new_value);
+      break;
+
+    case SYNC_OP_AND:
+      aarch64_output_op3 (emit,"and", t1, old_value, new_value);
+      break;
+
+    case SYNC_OP_NAND:
+      aarch64_output_op3 (emit, "and", t1, old_value, new_value);
+      aarch64_output_op2 (emit, "mvn", t1, t1);
+      break;
+
+    case SYNC_OP_NONE:
+      t1 = new_value;
+      break;
+    }
+
+  aarch64_output_sync_store (emit, mode, t2, t1, memory, release_barrier);
+  operands[0] = t2;
+  aarch64_output_asm_insn (emit, 0, operands, "cbnz\t%%w0, %sLSYT%%=",
+                          LOCAL_LABEL_PREFIX);
+
+  aarch64_output_asm_insn (emit, 1, operands, "%sLSYB%%=:", LOCAL_LABEL_PREFIX);
+}
+
+static rtx
+aarch64_get_sync_operand (rtx *operands, int index, rtx default_value)
+{
+  if (index > 0)
+    default_value = operands[index - 1];
+
+  return default_value;
+}
+
+#define FETCH_SYNC_OPERAND(NAME, DEFAULT)                                \
+  aarch64_get_sync_operand (operands, (int) get_attr_sync_##NAME (insn), \
+                           DEFAULT);
+
+/* Extract the operands for a synchroniztion instruction from the
+   instructions attributes and emit the instruction.  */
+static void
+aarch64_process_output_sync_insn (emit_f emit, rtx insn, rtx *operands)
+{
+  rtx result, memory, required_value, new_value, t1, t2;
+  int release_barrier;
+  int acquire_barrier = 1;
+  enum machine_mode mode;
+  enum attr_sync_op sync_op;
+
+  result = FETCH_SYNC_OPERAND (result, 0);
+  memory = FETCH_SYNC_OPERAND (memory, 0);
+  required_value = FETCH_SYNC_OPERAND (required_value, 0);
+  new_value = FETCH_SYNC_OPERAND (new_value, 0);
+  t1 = FETCH_SYNC_OPERAND (t1, 0);
+  t2 = FETCH_SYNC_OPERAND (t2, 0);
+  release_barrier =
+    get_attr_sync_release_barrier (insn) == SYNC_RELEASE_BARRIER_YES;
+  sync_op = get_attr_sync_op (insn);
+  mode = GET_MODE (memory);
+
+  aarch64_output_sync_loop (emit, mode, result, memory, required_value,
+                           new_value, t1, t2, sync_op, acquire_barrier,
+                           release_barrier);
+}
+
+/* Emit a synchronization instruction loop.  */
+const char *
+aarch64_output_sync_insn (rtx insn, rtx *operands)
+{
+  aarch64_process_output_sync_insn (aarch64_emit, insn, operands);
+  return "";
+}
+
+/* Emit a store release instruction appropriate for the specified
+   mode.  */
+const char *
+aarch64_output_sync_lock_release (rtx value, rtx memory)
+{
+  const char *suffix;
+  enum machine_mode mode;
+  rtx operands[2];
+  operands[0] = value;
+  operands[1] = memory;
+  mode = GET_MODE (memory);
+  suffix = aarch64_load_store_suffix (mode);
+  aarch64_output_asm_insn (aarch64_emit, 0, operands,
+                          "stlr%s\t%%%s0, %%1",
+                          suffix,
+                          mode == DImode ? "x" : "w");
+  return "";
+}
+
+/* Count the number of machine instruction that will be emitted for a
+   synchronization instruction.  Note that the emitter used does not
+   emit instructions, it just counts instructions being careful not
+   to count labels.  */
+unsigned int
+aarch64_sync_loop_insns (rtx insn, rtx *operands)
+{
+  aarch64_insn_count = 0;
+  aarch64_process_output_sync_insn (aarch64_count, insn, operands);
+  return aarch64_insn_count;
+}
+
+/* Helper to call a target sync instruction generator, dealing with
+   the variation in operands required by the different generators.  */
+static rtx
+aarch64_call_generator (struct aarch64_sync_generator *generator, rtx old_value,
+                       rtx memory, rtx required_value, rtx new_value)
+{
+  switch (generator->op)
+    {
+    case aarch64_sync_generator_omn:
+      gcc_assert (! required_value);
+      return generator->u.omn (old_value, memory, new_value);
+
+    case aarch64_sync_generator_omrn:
+      gcc_assert (required_value);
+      return generator->u.omrn (old_value, memory, required_value, new_value);
+    }
+
+  return NULL;
+}
+
+/* Expand a synchronization loop.  The synchronization loop is
+   expanded as an opaque block of instructions in order to ensure that
+   we do not subsequently get extraneous memory accesses inserted
+   within the critical region.  The exclusive access property of
+   LDXR/STXR instructions is only guaranteed if there are no intervening
+   memory accesses.  */
+void
+aarch64_expand_sync (enum machine_mode mode,
+                    struct aarch64_sync_generator *generator,
+                    rtx target, rtx memory, rtx required_value, rtx new_value)
+{
+  if (target == NULL)
+    target = gen_reg_rtx (mode);
+
+  memory = aarch64_legitimize_sync_memory (memory);
+  if (mode != SImode && mode != DImode)
+    {
+      rtx load_temp = gen_reg_rtx (SImode);
+
+      if (required_value)
+       required_value = convert_modes (SImode, mode, required_value, true);
+
+      new_value = convert_modes (SImode, mode, new_value, true);
+      emit_insn (aarch64_call_generator (generator, load_temp, memory,
+                                        required_value, new_value));
+      emit_move_insn (target, gen_lowpart (mode, load_temp));
+    }
+  else
+    {
+      emit_insn (aarch64_call_generator (generator, target, memory,
+                                        required_value, new_value));
+    }
+}
+
+/* Return the equivalent letter for size.  */
+static unsigned char
+sizetochar (int size)
+{
+  switch (size)
+    {
+    case 64: return 'd';
+    case 32: return 's';
+    case 16: return 'h';
+    case 8 : return 'b';
+    default: gcc_unreachable ();
+    }
+}
+
+static int
+aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, int inverse,
+                             rtx *modconst, int *elementwidth,
+                             unsigned char *elementchar,
+                             int *mvn, int *shift)
+{
+#define CHECK(STRIDE, ELSIZE, CLASS, TEST, SHIFT, NEG) \
+  matches = 1;                                         \
+  for (i = 0; i < idx; i += (STRIDE))                  \
+    if (!(TEST))                                       \
+      matches = 0;                                     \
+  if (matches)                                         \
+    {                                                  \
+      immtype = (CLASS);                               \
+      elsize = (ELSIZE);                               \
+      elchar = sizetochar (elsize);                    \
+      eshift = (SHIFT);                                        \
+      emvn = (NEG);                                    \
+      break;                                           \
+    }
+
+  unsigned int i, elsize = 0, idx = 0, n_elts = CONST_VECTOR_NUNITS (op);
+  unsigned int innersize = GET_MODE_SIZE (GET_MODE_INNER (mode));
+  unsigned char bytes[16];
+  unsigned char elchar = 0;
+  int immtype = -1, matches;
+  unsigned int invmask = inverse ? 0xff : 0;
+  int eshift, emvn;
+
+  /* TODO: Vectors of float constants.  */
+  if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
+    return -1;
+
+  /* Splat vector constant out into a byte vector.  */
+  for (i = 0; i < n_elts; i++)
+    {
+      rtx el = CONST_VECTOR_ELT (op, i);
+      unsigned HOST_WIDE_INT elpart;
+      unsigned int part, parts;
+
+      if (GET_CODE (el) == CONST_INT)
+        {
+          elpart = INTVAL (el);
+          parts = 1;
+        }
+      else if (GET_CODE (el) == CONST_DOUBLE)
+        {
+          elpart = CONST_DOUBLE_LOW (el);
+          parts = 2;
+        }
+      else
+        gcc_unreachable ();
+
+      for (part = 0; part < parts; part++)
+        {
+          unsigned int byte;
+          for (byte = 0; byte < innersize; byte++)
+            {
+              bytes[idx++] = (elpart & 0xff) ^ invmask;
+              elpart >>= BITS_PER_UNIT;
+            }
+          if (GET_CODE (el) == CONST_DOUBLE)
+            elpart = CONST_DOUBLE_HIGH (el);
+        }
+    }
+
+  /* Sanity check.  */
+  gcc_assert (idx == GET_MODE_SIZE (mode));
+
+  do
+    {
+      CHECK (4, 32, 0, bytes[i] == bytes[0] && bytes[i + 1] == 0
+            && bytes[i + 2] == 0 && bytes[i + 3] == 0, 0, 0);
+
+      CHECK (4, 32, 1, bytes[i] == 0 && bytes[i + 1] == bytes[1]
+            && bytes[i + 2] == 0 && bytes[i + 3] == 0, 8, 0);
+
+      CHECK (4, 32, 2, bytes[i] == 0 && bytes[i + 1] == 0
+            && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 16, 0);
+
+      CHECK (4, 32, 3, bytes[i] == 0 && bytes[i + 1] == 0
+            && bytes[i + 2] == 0 && bytes[i + 3] == bytes[3], 24, 0);
+
+      CHECK (2, 16, 4, bytes[i] == bytes[0] && bytes[i + 1] == 0, 0, 0);
+
+      CHECK (2, 16, 5, bytes[i] == 0 && bytes[i + 1] == bytes[1], 8, 0);
+
+      CHECK (4, 32, 6, bytes[i] == bytes[0] && bytes[i + 1] == 0xff
+            && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 0, 1);
+
+      CHECK (4, 32, 7, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
+            && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 8, 1);
+
+      CHECK (4, 32, 8, bytes[i] == 0xff && bytes[i + 1] == 0xff
+            && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 16, 1);
+
+      CHECK (4, 32, 9, bytes[i] == 0xff && bytes[i + 1] == 0xff
+            && bytes[i + 2] == 0xff && bytes[i + 3] == bytes[3], 24, 1);
+
+      CHECK (2, 16, 10, bytes[i] == bytes[0] && bytes[i + 1] == 0xff, 0, 1);
+
+      CHECK (2, 16, 11, bytes[i] == 0xff && bytes[i + 1] == bytes[1], 8, 1);
+
+      CHECK (4, 32, 12, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
+            && bytes[i + 2] == 0 && bytes[i + 3] == 0, 0, 0);
+
+      CHECK (4, 32, 13, bytes[i] == 0 && bytes[i + 1] == bytes[1]
+            && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 0, 1);
+
+      CHECK (4, 32, 14, bytes[i] == 0xff && bytes[i + 1] == 0xff
+            && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 0, 0);
+
+      CHECK (4, 32, 15, bytes[i] == 0 && bytes[i + 1] == 0
+            && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 0, 1);
+
+      CHECK (1, 8, 16, bytes[i] == bytes[0], 0, 0);
+
+      CHECK (1, 64, 17, (bytes[i] == 0 || bytes[i] == 0xff)
+            && bytes[i] == bytes[(i + 8) % idx], 0, 0);
+    }
+  while (0);
+
+  /* TODO: Currently the assembler cannot handle types 12 to 15.
+     And there is no way to specify cmode through the compiler.
+     Disable them till there is support in the assembler.  */
+  if (immtype == -1
+      || (immtype >= 12 && immtype <= 15)
+      || immtype == 18)
+    return -1;
+
+
+  if (elementwidth)
+    *elementwidth = elsize;
+
+  if (elementchar)
+    *elementchar = elchar;
+
+  if (mvn)
+    *mvn = emvn;
+
+  if (shift)
+    *shift = eshift;
+
+  if (modconst)
+    {
+      unsigned HOST_WIDE_INT imm = 0;
+
+      /* Un-invert bytes of recognized vector, if necessary.  */
+      if (invmask != 0)
+        for (i = 0; i < idx; i++)
+          bytes[i] ^= invmask;
+
+      if (immtype == 17)
+        {
+          /* FIXME: Broken on 32-bit H_W_I hosts.  */
+          gcc_assert (sizeof (HOST_WIDE_INT) == 8);
+
+          for (i = 0; i < 8; i++)
+            imm |= (unsigned HOST_WIDE_INT) (bytes[i] ? 0xff : 0)
+             << (i * BITS_PER_UNIT);
+
+          *modconst = GEN_INT (imm);
+        }
+      else
+        {
+          unsigned HOST_WIDE_INT imm = 0;
+
+          for (i = 0; i < elsize / BITS_PER_UNIT; i++)
+            imm |= (unsigned HOST_WIDE_INT) bytes[i] << (i * BITS_PER_UNIT);
+
+         /* Construct 'abcdefgh' because the assembler cannot handle
+            generic constants.  */
+         gcc_assert (shift != NULL && mvn != NULL);
+         if (*mvn)
+           imm = ~imm;
+         imm = (imm >> *shift) & 0xff;
+          *modconst = GEN_INT (imm);
+        }
+    }
+
+  return immtype;
+#undef CHECK
+}
+
+/* Return TRUE if rtx X is legal for use as either a AdvSIMD MOVI instruction
+   (or, implicitly, MVNI) immediate.  Write back width per element
+   to *ELEMENTWIDTH (or zero for float elements), and a modified constant
+   (whatever should be output for a MOVI instruction) in *MODCONST.  */
+int
+aarch64_simd_immediate_valid_for_move (rtx op, enum machine_mode mode,
+                                      rtx *modconst, int *elementwidth,
+                                      unsigned char *elementchar,
+                                      int *mvn, int *shift)
+{
+  rtx tmpconst;
+  int tmpwidth;
+  unsigned char tmpwidthc;
+  int tmpmvn = 0, tmpshift = 0;
+  int retval = aarch64_simd_valid_immediate (op, mode, 0, &tmpconst,
+                                            &tmpwidth, &tmpwidthc,
+                                            &tmpmvn, &tmpshift);
+
+  if (retval == -1)
+    return 0;
+
+  if (modconst)
+    *modconst = tmpconst;
+
+  if (elementwidth)
+    *elementwidth = tmpwidth;
+
+  if (elementchar)
+    *elementchar = tmpwidthc;
+
+  if (mvn)
+    *mvn = tmpmvn;
+
+  if (shift)
+    *shift = tmpshift;
+
+  return 1;
+}
+
+static bool
+aarch64_const_vec_all_same_int_p (rtx x,
+                                 HOST_WIDE_INT minval,
+                                 HOST_WIDE_INT maxval)
+{
+  HOST_WIDE_INT firstval;
+  int count, i;
+
+  if (GET_CODE (x) != CONST_VECTOR
+      || GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_INT)
+    return false;
+
+  firstval = INTVAL (CONST_VECTOR_ELT (x, 0));
+  if (firstval < minval || firstval > maxval)
+    return false;
+
+  count = CONST_VECTOR_NUNITS (x);
+  for (i = 1; i < count; i++)
+    if (INTVAL (CONST_VECTOR_ELT (x, i)) != firstval)
+      return false;
+
+  return true;
+}
+
+/* Check of immediate shift constants are within range.  */
+bool
+aarch64_simd_shift_imm_p (rtx x, enum machine_mode mode, bool left)
+{
+  int bit_width = GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT;
+  if (left)
+    return aarch64_const_vec_all_same_int_p (x, 0, bit_width - 1);
+  else
+    return aarch64_const_vec_all_same_int_p (x, 1, bit_width);
+}
+
+bool
+aarch64_simd_imm_zero_p (rtx x, enum machine_mode mode)
+{
+  int nunits;
+  int i;
+
+ if (GET_CODE (x) != CONST_VECTOR)
+   return false;
+
+  nunits = GET_MODE_NUNITS (mode);
+
+  for (i = 0; i < nunits; i++)
+    if (INTVAL (CONST_VECTOR_ELT (x, i)) != 0)
+      return false;
+
+  return true;
+}
+
+bool
+aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+  HOST_WIDE_INT imm = INTVAL (x);
+  int i;
+
+  for (i = 0; i < 8; i++)
+    {
+      unsigned int byte = imm & 0xff;
+      if (byte != 0xff && byte != 0)
+       return false;
+      imm >>= 8;
+    }
+
+  return true;
+}
+
+/* Return a const_int vector of VAL.  */
+rtx
+aarch64_simd_gen_const_vector_dup (enum machine_mode mode, int val)
+{
+  int nunits = GET_MODE_NUNITS (mode);
+  rtvec v = rtvec_alloc (nunits);
+  int i;
+
+  for (i=0; i < nunits; i++)
+    RTVEC_ELT (v, i) = GEN_INT (val);
+
+  return gen_rtx_CONST_VECTOR (mode, v);
+}
+
+/* Construct and return a PARALLEL RTX vector.  */
+rtx
+aarch64_simd_vect_par_cnst_half (enum machine_mode mode, bool high)
+{
+  int nunits = GET_MODE_NUNITS (mode);
+  rtvec v = rtvec_alloc (nunits / 2);
+  int base = high ? nunits / 2 : 0;
+  rtx t1;
+  int i;
+
+  for (i=0; i < nunits / 2; i++)
+    RTVEC_ELT (v, i) = GEN_INT (base + i);
+
+  t1 = gen_rtx_PARALLEL (mode, v);
+  return t1;
+}
+
+/* Bounds-check lanes.  Ensure OPERAND lies between LOW (inclusive) and
+   HIGH (exclusive).  */
+void
+aarch64_simd_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
+{
+  HOST_WIDE_INT lane;
+  gcc_assert (GET_CODE (operand) == CONST_INT);
+  lane = INTVAL (operand);
+
+  if (lane < low || lane >= high)
+    error ("lane out of range");
+}
+
+void
+aarch64_simd_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
+{
+  gcc_assert (GET_CODE (operand) == CONST_INT);
+  HOST_WIDE_INT lane = INTVAL (operand);
+
+  if (lane < low || lane >= high)
+    error ("constant out of range");
+}
+
+/* Emit code to reinterpret one AdvSIMD type as another,
+   without altering bits.  */
+void
+aarch64_simd_reinterpret (rtx dest, rtx src)
+{
+  emit_move_insn (dest, gen_lowpart (GET_MODE (dest), src));
+}
+
+/* Emit code to place a AdvSIMD pair result in memory locations (with equal
+   registers).  */
+void
+aarch64_simd_emit_pair_result_insn (enum machine_mode mode,
+                           rtx (*intfn) (rtx, rtx, rtx), rtx destaddr,
+                            rtx op1)
+{
+  rtx mem = gen_rtx_MEM (mode, destaddr);
+  rtx tmp1 = gen_reg_rtx (mode);
+  rtx tmp2 = gen_reg_rtx (mode);
+
+  emit_insn (intfn (tmp1, op1, tmp2));
+
+  emit_move_insn (mem, tmp1);
+  mem = adjust_address (mem, mode, GET_MODE_SIZE (mode));
+  emit_move_insn (mem, tmp2);
+}
+
+/* Return TRUE if OP is a valid vector addressing mode.  */
+bool
+aarch64_simd_mem_operand_p (rtx op)
+{
+  return MEM_P (op) && (GET_CODE (XEXP (op, 0)) == POST_INC
+                       || GET_CODE (XEXP (op, 0)) == REG);
+}
+
+/* Set up OPERANDS for a register copy from SRC to DEST, taking care
+   not to early-clobber SRC registers in the process.
+
+   We assume that the operands described by SRC and DEST represent a
+   decomposed copy of OPERANDS[1] into OPERANDS[0].  COUNT is the
+   number of components into which the copy has been decomposed.  */
+void
+aarch64_simd_disambiguate_copy (rtx *operands, rtx *dest,
+                               rtx *src, unsigned int count)
+{
+  unsigned int i;
+
+  if (!reg_overlap_mentioned_p (operands[0], operands[1])
+      || REGNO (operands[0]) < REGNO (operands[1]))
+    {
+      for (i = 0; i < count; i++)
+       {
+         operands[2 * i] = dest[i];
+         operands[2 * i + 1] = src[i];
+       }
+    }
+  else
+    {
+      for (i = 0; i < count; i++)
+       {
+         operands[2 * i] = dest[count - i - 1];
+         operands[2 * i + 1] = src[count - i - 1];
+       }
+    }
+}
+
+/* Compute and return the length of aarch64_simd_mov<mode>, where <mode> is
+   one of VSTRUCT modes: OI, CI or XI.  */
+int
+aarch64_simd_attr_length_move (rtx insn)
+{
+  rtx reg, mem, addr;
+  int load;
+  enum machine_mode mode;
+
+  extract_insn_cached (insn);
+
+  if (REG_P (recog_data.operand[0]) && REG_P (recog_data.operand[1]))
+    {
+      mode = GET_MODE (recog_data.operand[0]);
+      switch (mode)
+       {
+       case OImode:
+         return 8;
+       case CImode:
+         return 12;
+       case XImode:
+         return 16;
+       default:
+         gcc_unreachable ();
+       }
+    }
+  return 4;
+}
+
+static unsigned HOST_WIDE_INT
+aarch64_shift_truncation_mask (enum machine_mode mode)
+{
+  return
+    (aarch64_vector_mode_supported_p (mode)
+     || aarch64_vect_struct_mode_p (mode)) ? 0 : (GET_MODE_BITSIZE (mode) - 1);
+}
+
+#ifndef TLS_SECTION_ASM_FLAG
+#define TLS_SECTION_ASM_FLAG 'T'
+#endif
+
+void
+aarch64_elf_asm_named_section (const char *name, unsigned int flags,
+                              tree decl ATTRIBUTE_UNUSED)
+{
+  char flagchars[10], *f = flagchars;
+
+  /* If we have already declared this section, we can use an
+     abbreviated form to switch back to it -- unless this section is
+     part of a COMDAT groups, in which case GAS requires the full
+     declaration every time.  */
+  if (!(HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
+      && (flags & SECTION_DECLARED))
+    {
+      fprintf (asm_out_file, "\t.section\t%s\n", name);
+      return;
+    }
+
+  if (!(flags & SECTION_DEBUG))
+    *f++ = 'a';
+  if (flags & SECTION_WRITE)
+    *f++ = 'w';
+  if (flags & SECTION_CODE)
+    *f++ = 'x';
+  if (flags & SECTION_SMALL)
+    *f++ = 's';
+  if (flags & SECTION_MERGE)
+    *f++ = 'M';
+  if (flags & SECTION_STRINGS)
+    *f++ = 'S';
+  if (flags & SECTION_TLS)
+    *f++ = TLS_SECTION_ASM_FLAG;
+  if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
+    *f++ = 'G';
+  *f = '\0';
+
+  fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
+
+  if (!(flags & SECTION_NOTYPE))
+    {
+      const char *type;
+      const char *format;
+
+      if (flags & SECTION_BSS)
+       type = "nobits";
+      else
+       type = "progbits";
+
+#ifdef TYPE_OPERAND_FMT
+      format = "," TYPE_OPERAND_FMT;
+#else
+      format = ",@%s";
+#endif
+
+      fprintf (asm_out_file, format, type);
+
+      if (flags & SECTION_ENTSIZE)
+       fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
+      if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
+       {
+         if (TREE_CODE (decl) == IDENTIFIER_NODE)
+           fprintf (asm_out_file, ",%s,comdat", IDENTIFIER_POINTER (decl));
+         else
+           fprintf (asm_out_file, ",%s,comdat",
+                    IDENTIFIER_POINTER (DECL_COMDAT_GROUP (decl)));
+       }
+    }
+
+  putc ('\n', asm_out_file);
+}
+
+/* Select a format to encode pointers in exception handling data.  */
+int
+aarch64_asm_preferred_eh_data_format (int code ATTRIBUTE_UNUSED, int global)
+{
+   int type;
+   switch (aarch64_cmodel)
+     {
+     case AARCH64_CMODEL_TINY:
+     case AARCH64_CMODEL_TINY_PIC:
+     case AARCH64_CMODEL_SMALL:
+     case AARCH64_CMODEL_SMALL_PIC:
+       /* text+got+data < 4Gb.  4-byte signed relocs are sufficient
+         for everything.  */
+       type = DW_EH_PE_sdata4;
+       break;
+     default:
+       /* No assumptions here.  8-byte relocs required.  */
+       type = DW_EH_PE_sdata8;
+       break;
+     }
+   return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
+}
+
+static void
+aarch64_start_file (void)
+{
+  if (selected_arch)
+    asm_fprintf (asm_out_file, "\t.arch %s\n", selected_arch->name);
+  else if (selected_cpu)
+    asm_fprintf (asm_out_file, "\t.cpu %s\n", selected_cpu->name);
+  default_file_start();
+}
+
+/* Target hook for c_mode_for_suffix.  */
+static enum machine_mode
+aarch64_c_mode_for_suffix (char suffix)
+{
+  if (suffix == 'q')
+    return TFmode;
+
+  return VOIDmode;
+}
+
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST aarch64_address_cost
+
+/* This hook will determines whether unnamed bitfields affect the alignment
+   of the containing structure.  The hook returns true if the structure
+   should inherit the alignment requirements of an unnamed bitfield's
+   type.  */
+#undef TARGET_ALIGN_ANON_BITFIELD
+#define TARGET_ALIGN_ANON_BITFIELD hook_bool_void_true
+
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP "\t.xword\t"
+
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
+
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
+
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
+  hook_bool_const_tree_hwi_hwi_const_tree_true
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START aarch64_start_file
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK aarch64_output_mi_thunk
+
+#undef TARGET_ASM_SELECT_RTX_SECTION
+#define TARGET_ASM_SELECT_RTX_SECTION aarch64_select_rtx_section
+
+#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
+#define TARGET_ASM_TRAMPOLINE_TEMPLATE aarch64_asm_trampoline_template
+
+#undef TARGET_BUILD_BUILTIN_VA_LIST
+#define TARGET_BUILD_BUILTIN_VA_LIST aarch64_build_builtin_va_list
+
+#undef TARGET_CALLEE_COPIES
+#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_false
+
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE aarch64_can_eliminate
+
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+#define TARGET_CANNOT_FORCE_CONST_MEM aarch64_cannot_force_const_mem
+
+#undef TARGET_CONDITIONAL_REGISTER_USAGE
+#define TARGET_CONDITIONAL_REGISTER_USAGE aarch64_conditional_register_usage
+
+/* Only the least significant bit is used for initialization guard
+   variables.  */
+#undef TARGET_CXX_GUARD_MASK_BIT
+#define TARGET_CXX_GUARD_MASK_BIT hook_bool_void_true
+
+#undef TARGET_C_MODE_FOR_SUFFIX
+#define TARGET_C_MODE_FOR_SUFFIX aarch64_c_mode_for_suffix
+
+#ifdef TARGET_BIG_ENDIAN_DEFAULT
+#undef  TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS (MASK_BIG_END)
+#endif
+
+#undef TARGET_CLASS_MAX_NREGS
+#define TARGET_CLASS_MAX_NREGS aarch64_class_max_nregs
+
+#undef  TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN aarch64_expand_builtin
+
+#undef TARGET_EXPAND_BUILTIN_VA_START
+#define TARGET_EXPAND_BUILTIN_VA_START aarch64_expand_builtin_va_start
+
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG aarch64_function_arg
+
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE aarch64_function_arg_advance
+
+#undef TARGET_FUNCTION_ARG_BOUNDARY
+#define TARGET_FUNCTION_ARG_BOUNDARY aarch64_function_arg_boundary
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL aarch64_function_ok_for_sibcall
+
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE aarch64_function_value
+
+#undef TARGET_FUNCTION_VALUE_REGNO_P
+#define TARGET_FUNCTION_VALUE_REGNO_P aarch64_function_value_regno_p
+
+#undef TARGET_FRAME_POINTER_REQUIRED
+#define TARGET_FRAME_POINTER_REQUIRED aarch64_frame_pointer_required
+
+#undef TARGET_GIMPLIFY_VA_ARG_EXPR
+#define TARGET_GIMPLIFY_VA_ARG_EXPR aarch64_gimplify_va_arg_expr
+
+#undef  TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS  aarch64_init_builtins
+
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P aarch64_legitimate_address_hook_p
+
+#undef TARGET_LEGITIMATE_CONSTANT_P
+#define TARGET_LEGITIMATE_CONSTANT_P aarch64_legitimate_constant_p
+
+#undef TARGET_LIBGCC_CMP_RETURN_MODE
+#define TARGET_LIBGCC_CMP_RETURN_MODE aarch64_libgcc_cmp_return_mode
+
+#undef TARGET_MEMORY_MOVE_COST
+#define TARGET_MEMORY_MOVE_COST aarch64_memory_move_cost
+
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
+
+/* This target hook should return true if accesses to volatile bitfields
+   should use the narrowest mode possible.  It should return false if these
+   accesses should use the bitfield container type.  */
+#undef TARGET_NARROW_VOLATILE_BITFIELD
+#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
+
+#undef  TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE aarch64_override_options
+
+#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
+#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
+  aarch64_override_options_after_change
+
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE aarch64_pass_by_reference
+
+#undef TARGET_PREFERRED_RELOAD_CLASS
+#define TARGET_PREFERRED_RELOAD_CLASS aarch64_preferred_reload_class
+
+#undef TARGET_SECONDARY_RELOAD
+#define TARGET_SECONDARY_RELOAD aarch64_secondary_reload
+
+#undef TARGET_SHIFT_TRUNCATION_MASK
+#define TARGET_SHIFT_TRUNCATION_MASK aarch64_shift_truncation_mask
+
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS aarch64_setup_incoming_varargs
+
+#undef TARGET_STRUCT_VALUE_RTX
+#define TARGET_STRUCT_VALUE_RTX   aarch64_struct_value_rtx
+
+#undef TARGET_REGISTER_MOVE_COST
+#define TARGET_REGISTER_MOVE_COST aarch64_register_move_cost
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY aarch64_return_in_memory
+
+#undef TARGET_RETURN_IN_MSB
+#define TARGET_RETURN_IN_MSB aarch64_return_in_msb
+
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS aarch64_rtx_costs
+
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT aarch64_trampoline_init
+
+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P aarch64_use_blocks_for_constant_p
+
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P aarch64_vector_mode_supported_p
+
+#undef TARGET_ARRAY_MODE_SUPPORTED_P
+#define TARGET_ARRAY_MODE_SUPPORTED_P aarch64_array_mode_supported_p
+
+#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
+#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE aarch64_preferred_simd_mode
+
+/* Section anchor support.  */
+
+#undef TARGET_MIN_ANCHOR_OFFSET
+#define TARGET_MIN_ANCHOR_OFFSET -256
+
+/* Limit the maximum anchor offset to 4k-1, since that's the limit for a
+   byte offset; we can do much more for larger data types, but have no way
+   to determine the size of the access.  We assume accesses are aligned.  */
+#undef TARGET_MAX_ANCHOR_OFFSET
+#define TARGET_MAX_ANCHOR_OFFSET 4095
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+#include "gt-aarch64.h"
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
new file mode 100644 (file)
index 0000000..3b8b033
--- /dev/null
@@ -0,0 +1,837 @@
+/* Machine description for AArch64 architecture.
+   Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+   Contributed by ARM Ltd.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+
+#ifndef GCC_AARCH64_H
+#define GCC_AARCH64_H
+
+/* Target CPU builtins.  */
+#define TARGET_CPU_CPP_BUILTINS()                      \
+  do                                                   \
+    {                                                  \
+      builtin_define ("__aarch64__");                  \
+      if (TARGET_BIG_END)                              \
+       builtin_define ("__AARCH64EB__");               \
+      else                                             \
+       builtin_define ("__AARCH64EL__");               \
+                                                       \
+      switch (aarch64_cmodel)                          \
+       {                                               \
+         case AARCH64_CMODEL_TINY:                     \
+         case AARCH64_CMODEL_TINY_PIC:                 \
+           builtin_define ("__AARCH64_CMODEL_TINY__"); \
+           break;                                      \
+         case AARCH64_CMODEL_SMALL:                    \
+         case AARCH64_CMODEL_SMALL_PIC:                \
+           builtin_define ("__AARCH64_CMODEL_SMALL__");\
+           break;                                      \
+         case AARCH64_CMODEL_LARGE:                    \
+           builtin_define ("__AARCH64_CMODEL_LARGE__");        \
+           break;                                      \
+         default:                                      \
+           break;                                      \
+       }                                               \
+                                                       \
+    } while (0)
+
+\f
+
+/* Target machine storage layout.  */
+
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE)    \
+  if (GET_MODE_CLASS (MODE) == MODE_INT                \
+      && GET_MODE_SIZE (MODE) < 4)             \
+    {                                          \
+      if (MODE == QImode || MODE == HImode)    \
+       {                                       \
+         MODE = SImode;                        \
+       }                                       \
+    }
+
+/* Bits are always numbered from the LSBit.  */
+#define BITS_BIG_ENDIAN 0
+
+/* Big/little-endian flavour.  */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
+
+/* AdvSIMD is supported in the default configuration, unless disabled by
+   -mgeneral-regs-only.  */
+#define TARGET_SIMD !TARGET_GENERAL_REGS_ONLY
+#define TARGET_FLOAT !TARGET_GENERAL_REGS_ONLY
+
+#define UNITS_PER_WORD         8
+
+#define UNITS_PER_VREG         16
+
+#define PARM_BOUNDARY          64
+
+#define STACK_BOUNDARY         128
+
+#define FUNCTION_BOUNDARY      32
+
+#define EMPTY_FIELD_BOUNDARY   32
+
+#define BIGGEST_ALIGNMENT      128
+
+#define SHORT_TYPE_SIZE                16
+
+#define INT_TYPE_SIZE          32
+
+#define LONG_TYPE_SIZE         64      /* XXX This should be an option */
+
+#define LONG_LONG_TYPE_SIZE    64
+
+#define FLOAT_TYPE_SIZE                32
+
+#define DOUBLE_TYPE_SIZE       64
+
+#define LONG_DOUBLE_TYPE_SIZE  128
+
+/* The architecture reserves all bits of the address for hardware use,
+   so the vbit must go into the delta field of pointers to member
+   functions.  This is the same config as that in the AArch32
+   port.  */
+#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta
+
+/* Make strings word-aligned so that strcpy from constants will be
+   faster.  */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN)         \
+  ((TREE_CODE (EXP) == STRING_CST              \
+    && !optimize_size                          \
+    && (ALIGN) < BITS_PER_WORD)                        \
+   ? BITS_PER_WORD : ALIGN)
+
+#define DATA_ALIGNMENT(EXP, ALIGN)             \
+  ((((ALIGN) < BITS_PER_WORD)                  \
+    && (TREE_CODE (EXP) == ARRAY_TYPE          \
+       || TREE_CODE (EXP) == UNION_TYPE        \
+       || TREE_CODE (EXP) == RECORD_TYPE))     \
+   ? BITS_PER_WORD : (ALIGN))
+
+#define LOCAL_ALIGNMENT(EXP, ALIGN) DATA_ALIGNMENT(EXP, ALIGN)
+
+#define STRUCTURE_SIZE_BOUNDARY                8
+
+/* Defined by the ABI */
+#define WCHAR_TYPE "unsigned int"
+#define WCHAR_TYPE_SIZE                        32
+
+/* Using long long breaks -ansi and -std=c90, so these will need to be
+   made conditional for an LLP64 ABI.  */
+
+#define SIZE_TYPE      "long unsigned int"
+
+#define PTRDIFF_TYPE   "long int"
+
+#define PCC_BITFIELD_TYPE_MATTERS      1
+
+
+/* Instruction tuning/selection flags.  */
+
+/* Bit values used to identify processor capabilities.  */
+#define AARCH64_FL_SIMD       (1 << 0) /* Has SIMD instructions.  */
+#define AARCH64_FL_FP         (1 << 1) /* Has FP.  */
+#define AARCH64_FL_CRYPTO     (1 << 2) /* Has crypto.  */
+#define AARCH64_FL_SLOWMUL    (1 << 3) /* A slow multiply core.  */
+
+/* Has FP and SIMD.  */
+#define AARCH64_FL_FPSIMD     (AARCH64_FL_FP | AARCH64_FL_SIMD)
+
+/* Has FP without SIMD.  */
+#define AARCH64_FL_FPQ16      (AARCH64_FL_FP & ~AARCH64_FL_SIMD)
+
+/* Architecture flags that effect instruction selection.  */
+#define AARCH64_FL_FOR_ARCH8       (AARCH64_FL_FPSIMD)
+
+/* Macros to test ISA flags.  */
+extern unsigned long aarch64_isa_flags;
+#define AARCH64_ISA_CRYPTO         (aarch64_isa_flags & AARCH64_FL_CRYPTO)
+#define AARCH64_ISA_FP             (aarch64_isa_flags & AARCH64_FL_FP)
+#define AARCH64_ISA_SIMD           (aarch64_isa_flags & AARCH64_FL_SIMD)
+
+/* Macros to test tuning flags.  */
+extern unsigned long aarch64_tune_flags;
+#define AARCH64_TUNE_SLOWMUL       (aarch64_tune_flags & AARCH64_FL_SLOWMUL)
+
+
+/* Standard register usage.  */
+
+/* 31 64-bit general purpose registers R0-R30:
+   R30         LR (link register)
+   R29         FP (frame pointer)
+   R19-R28     Callee-saved registers
+   R18         The platform register; use as temporary register.
+   R17         IP1 The second intra-procedure-call temporary register
+               (can be used by call veneers and PLT code); otherwise use
+               as a temporary register
+   R16         IP0 The first intra-procedure-call temporary register (can
+               be used by call veneers and PLT code); otherwise use as a
+               temporary register
+   R9-R15      Temporary registers
+   R8          Structure value parameter / temporary register
+   R0-R7       Parameter/result registers
+
+   SP          stack pointer, encoded as X/R31 where permitted.
+   ZR          zero register, encoded as X/R31 elsewhere
+
+   32 x 128-bit floating-point/vector registers
+   V16-V31     Caller-saved (temporary) registers
+   V8-V15      Callee-saved registers
+   V0-V7       Parameter/result registers
+
+   The vector register V0 holds scalar B0, H0, S0 and D0 in its least
+   significant bits.  Unlike AArch32 S1 is not packed into D0,
+   etc.  */
+
+/* Note that we don't mark X30 as a call-clobbered register.  The idea is
+   that it's really the call instructions themselves which clobber X30.
+   We don't care what the called function does with it afterwards.
+
+   This approach makes it easier to implement sibcalls.  Unlike normal
+   calls, sibcalls don't clobber X30, so the register reaches the
+   called function intact.  EPILOGUE_USES says that X30 is useful
+   to the called function.  */
+
+#define FIXED_REGISTERS                                        \
+  {                                                    \
+    0, 0, 0, 0,   0, 0, 0, 0,  /* R0 - R7 */           \
+    0, 0, 0, 0,   0, 0, 0, 0,  /* R8 - R15 */          \
+    0, 0, 0, 0,   0, 0, 0, 0,  /* R16 - R23 */         \
+    0, 0, 0, 0,   0, 1, 0, 1,  /* R24 - R30, SP */     \
+    0, 0, 0, 0,   0, 0, 0, 0,   /* V0 - V7 */           \
+    0, 0, 0, 0,   0, 0, 0, 0,   /* V8 - V15 */         \
+    0, 0, 0, 0,   0, 0, 0, 0,   /* V16 - V23 */         \
+    0, 0, 0, 0,   0, 0, 0, 0,   /* V24 - V31 */         \
+    1, 1, 1,                   /* SFP, AP, CC */       \
+  }
+
+#define CALL_USED_REGISTERS                            \
+  {                                                    \
+    1, 1, 1, 1,   1, 1, 1, 1,  /* R0 - R7 */           \
+    1, 1, 1, 1,   1, 1, 1, 1,  /* R8 - R15 */          \
+    1, 1, 1, 0,   0, 0, 0, 0,  /* R16 - R23 */         \
+    0, 0, 0, 0,   0, 1, 0, 1,  /* R24 - R30, SP */     \
+    1, 1, 1, 1,   1, 1, 1, 1,  /* V0 - V7 */           \
+    0, 0, 0, 0,   0, 0, 0, 0,  /* V8 - V15 */          \
+    1, 1, 1, 1,   1, 1, 1, 1,   /* V16 - V23 */         \
+    1, 1, 1, 1,   1, 1, 1, 1,   /* V24 - V31 */         \
+    1, 1, 1,                   /* SFP, AP, CC */       \
+  }
+
+#define REGISTER_NAMES                                         \
+  {                                                            \
+    "x0",  "x1",  "x2",  "x3",  "x4",  "x5",  "x6",  "x7",     \
+    "x8",  "x9",  "x10", "x11", "x12", "x13", "x14", "x15",    \
+    "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",    \
+    "x24", "x25", "x26", "x27", "x28", "x29", "x30", "sp",     \
+    "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",  "v7",     \
+    "v8",  "v9",  "v10", "v11", "v12", "v13", "v14", "v15",    \
+    "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",    \
+    "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",    \
+    "sfp", "ap",  "cc",                                                \
+  }
+
+/* Generate the register aliases for core register N */
+#define R_ALIASES(N) {"r" # N, R0_REGNUM + (N)}, \
+                     {"w" # N, R0_REGNUM + (N)}
+
+#define V_ALIASES(N) {"q" # N, V0_REGNUM + (N)}, \
+                     {"d" # N, V0_REGNUM + (N)}, \
+                     {"s" # N, V0_REGNUM + (N)}, \
+                     {"h" # N, V0_REGNUM + (N)}, \
+                     {"b" # N, V0_REGNUM + (N)}
+
+/* Provide aliases for all of the ISA defined register name forms.
+   These aliases are convenient for use in the clobber lists of inline
+   asm statements.  */
+
+#define ADDITIONAL_REGISTER_NAMES \
+  { R_ALIASES(0),  R_ALIASES(1),  R_ALIASES(2),  R_ALIASES(3),  \
+    R_ALIASES(4),  R_ALIASES(5),  R_ALIASES(6),  R_ALIASES(7),  \
+    R_ALIASES(8),  R_ALIASES(9),  R_ALIASES(10), R_ALIASES(11), \
+    R_ALIASES(12), R_ALIASES(13), R_ALIASES(14), R_ALIASES(15), \
+    R_ALIASES(16), R_ALIASES(17), R_ALIASES(18), R_ALIASES(19), \
+    R_ALIASES(20), R_ALIASES(21), R_ALIASES(22), R_ALIASES(23), \
+    R_ALIASES(24), R_ALIASES(25), R_ALIASES(26), R_ALIASES(27), \
+    R_ALIASES(28), R_ALIASES(29), R_ALIASES(30), /* 31 omitted  */ \
+    V_ALIASES(0),  V_ALIASES(1),  V_ALIASES(2),  V_ALIASES(3),  \
+    V_ALIASES(4),  V_ALIASES(5),  V_ALIASES(6),  V_ALIASES(7),  \
+    V_ALIASES(8),  V_ALIASES(9),  V_ALIASES(10), V_ALIASES(11), \
+    V_ALIASES(12), V_ALIASES(13), V_ALIASES(14), V_ALIASES(15), \
+    V_ALIASES(16), V_ALIASES(17), V_ALIASES(18), V_ALIASES(19), \
+    V_ALIASES(20), V_ALIASES(21), V_ALIASES(22), V_ALIASES(23), \
+    V_ALIASES(24), V_ALIASES(25), V_ALIASES(26), V_ALIASES(27), \
+    V_ALIASES(28), V_ALIASES(29), V_ALIASES(30), V_ALIASES(31)  \
+  }
+
+/* Say that the epilogue uses the return address register.  Note that
+   in the case of sibcalls, the values "used by the epilogue" are
+   considered live at the start of the called function.  */
+
+#define EPILOGUE_USES(REGNO) \
+  ((REGNO) == LR_REGNUM)
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+   the stack pointer does not matter.  The value is tested only in
+   functions that have frame pointers.  */
+#define EXIT_IGNORE_STACK      1
+
+#define STATIC_CHAIN_REGNUM            R18_REGNUM
+#define HARD_FRAME_POINTER_REGNUM      R29_REGNUM
+#define FRAME_POINTER_REGNUM           SFP_REGNUM
+#define STACK_POINTER_REGNUM           SP_REGNUM
+#define ARG_POINTER_REGNUM             AP_REGNUM
+#define FIRST_PSEUDO_REGISTER          67
+
+/* The number of (integer) argument register available.  */
+#define NUM_ARG_REGS                   8
+#define NUM_FP_ARG_REGS                        8
+
+/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
+   four members.  */
+#define HA_MAX_NUM_FLDS                4
+
+/* External dwarf register number scheme.  These number are used to
+   identify registers in dwarf debug information, the values are
+   defined by the AArch64 ABI.  The numbering scheme is independent of
+   GCC's internal register numbering scheme.  */
+
+#define AARCH64_DWARF_R0        0
+
+/* The number of R registers, note 31! not 32.  */
+#define AARCH64_DWARF_NUMBER_R 31
+
+#define AARCH64_DWARF_SP       31
+#define AARCH64_DWARF_V0       64
+
+/* The number of V registers.  */
+#define AARCH64_DWARF_NUMBER_V 32
+
+/* For signal frames we need to use an alternative return column.  This
+   value must not correspond to a hard register and must be out of the
+   range of DWARF_FRAME_REGNUM().  */
+#define DWARF_ALT_FRAME_RETURN_COLUMN   \
+  (AARCH64_DWARF_V0 + AARCH64_DWARF_NUMBER_V)
+
+/* We add 1 extra frame register for use as the
+   DWARF_ALT_FRAME_RETURN_COLUMN.  */
+#define DWARF_FRAME_REGISTERS           (DWARF_ALT_FRAME_RETURN_COLUMN + 1)
+
+
+#define DBX_REGISTER_NUMBER(REGNO)     aarch64_dbx_register_number (REGNO)
+/* Provide a definition of DWARF_FRAME_REGNUM here so that fallback unwinders
+   can use DWARF_ALT_FRAME_RETURN_COLUMN defined below.  This is just the same
+   as the default definition in dwarf2out.c.  */
+#undef DWARF_FRAME_REGNUM
+#define DWARF_FRAME_REGNUM(REGNO)      DBX_REGISTER_NUMBER (REGNO)
+
+#define DWARF_FRAME_RETURN_COLUMN      DWARF_FRAME_REGNUM (LR_REGNUM)
+
+#define HARD_REGNO_NREGS(REGNO, MODE)  aarch64_hard_regno_nregs (REGNO, MODE)
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE)        aarch64_hard_regno_mode_ok (REGNO, MODE)
+
+#define MODES_TIEABLE_P(MODE1, MODE2)                  \
+  (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+#define DWARF2_UNWIND_INFO 1
+
+/* Use R0 through R3 to pass exception handling information.  */
+#define EH_RETURN_DATA_REGNO(N) \
+  ((N) < 4 ? ((unsigned int) R0_REGNUM + (N)) : INVALID_REGNUM)
+
+/* Select a format to encode pointers in exception handling data.  */
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
+  aarch64_asm_preferred_eh_data_format ((CODE), (GLOBAL))
+
+/* The register that holds the return address in exception handlers.  */
+#define AARCH64_EH_STACKADJ_REGNUM     (R0_REGNUM + 4)
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, AARCH64_EH_STACKADJ_REGNUM)
+
+/* Don't use __builtin_setjmp until we've defined it.  */
+#undef DONT_USE_BUILTIN_SETJMP
+#define DONT_USE_BUILTIN_SETJMP 1
+
+/* Register in which the structure value is to be returned.  */
+#define AARCH64_STRUCT_VALUE_REGNUM R8_REGNUM
+
+/* Non-zero if REGNO is part of the Core register set.
+
+   The rather unusual way of expressing this check is to avoid
+   warnings when building the compiler when R0_REGNUM is 0 and REGNO
+   is unsigned.  */
+#define GP_REGNUM_P(REGNO)                                             \
+  (((unsigned) (REGNO - R0_REGNUM)) <= (R30_REGNUM - R0_REGNUM))
+
+#define FP_REGNUM_P(REGNO)                     \
+  (((unsigned) (REGNO - V0_REGNUM)) <= (V31_REGNUM - V0_REGNUM))
+
+#define FP_LO_REGNUM_P(REGNO)            \
+  (((unsigned) (REGNO - V0_REGNUM)) <= (V15_REGNUM - V0_REGNUM))
+
+\f
+/* Register and constant classes.  */
+
+enum reg_class
+{
+  NO_REGS,
+  CORE_REGS,
+  GENERAL_REGS,
+  STACK_REG,
+  POINTER_REGS,
+  FP_LO_REGS,
+  FP_REGS,
+  ALL_REGS,
+  LIM_REG_CLASSES              /* Last */
+};
+
+#define N_REG_CLASSES  ((int) LIM_REG_CLASSES)
+
+#define REG_CLASS_NAMES                                \
+{                                              \
+  "NO_REGS",                                   \
+  "CORE_REGS",                                 \
+  "GENERAL_REGS",                              \
+  "STACK_REG",                                 \
+  "POINTER_REGS",                              \
+  "FP_LO_REGS",                                        \
+  "FP_REGS",                                   \
+  "ALL_REGS"                                   \
+}
+
+#define REG_CLASS_CONTENTS                                             \
+{                                                                      \
+  { 0x00000000, 0x00000000, 0x00000000 },      /* NO_REGS */           \
+  { 0x7fffffff, 0x00000000, 0x00000003 },      /* CORE_REGS */         \
+  { 0x7fffffff, 0x00000000, 0x00000003 },      /* GENERAL_REGS */      \
+  { 0x80000000, 0x00000000, 0x00000000 },      /* STACK_REG */         \
+  { 0xffffffff, 0x00000000, 0x00000003 },      /* POINTER_REGS */      \
+  { 0x00000000, 0x0000ffff, 0x00000000 },       /* FP_LO_REGS  */      \
+  { 0x00000000, 0xffffffff, 0x00000000 },       /* FP_REGS  */         \
+  { 0xffffffff, 0xffffffff, 0x00000007 }       /* ALL_REGS */          \
+}
+
+#define REGNO_REG_CLASS(REGNO) aarch64_regno_regclass (REGNO)
+
+#define INDEX_REG_CLASS        CORE_REGS
+#define BASE_REG_CLASS  POINTER_REGS
+
+/* Register pairs used to eliminate unneeded registers that point intoi
+   the stack frame.  */
+#define ELIMINABLE_REGS                                                        \
+{                                                                      \
+  { ARG_POINTER_REGNUM,                STACK_POINTER_REGNUM            },      \
+  { ARG_POINTER_REGNUM,                HARD_FRAME_POINTER_REGNUM       },      \
+  { FRAME_POINTER_REGNUM,      STACK_POINTER_REGNUM            },      \
+  { FRAME_POINTER_REGNUM,      HARD_FRAME_POINTER_REGNUM       },      \
+}
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+  (OFFSET) = aarch64_initial_elimination_offset (FROM, TO)
+
+/* CPU/ARCH option handling.  */
+#include "config/aarch64/aarch64-opts.h"
+
+enum target_cpus
+{
+#define AARCH64_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
+  TARGET_CPU_##IDENT,
+#include "aarch64-cores.def"
+#undef AARCH64_CORE
+  TARGET_CPU_generic
+};
+
+/* If there is no CPU defined at configure, use "generic" as default.  */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT \
+  (TARGET_CPU_generic | (AARCH64_CPU_DEFAULT_FLAGS << 6))
+#endif
+
+/* The processor for which instructions should be scheduled.  */
+extern enum aarch64_processor aarch64_tune;
+
+/* RTL generation support.  */
+#define INIT_EXPANDERS aarch64_init_expanders ()
+\f
+
+/* Stack layout; function entry, exit and calling.  */
+#define STACK_GROWS_DOWNWARD   1
+
+#define FRAME_GROWS_DOWNWARD   0
+
+#define STARTING_FRAME_OFFSET  0
+
+#define ACCUMULATE_OUTGOING_ARGS       1
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Fix for VFP */
+#define LIBCALL_VALUE(MODE)  \
+  gen_rtx_REG (MODE, FLOAT_MODE_P (MODE) ? V0_REGNUM : R0_REGNUM)
+
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+#define AARCH64_ROUND_UP(X, ALIGNMENT) \
+  (((X) + ((ALIGNMENT) - 1)) & ~((ALIGNMENT) - 1))
+
+#define AARCH64_ROUND_DOWN(X, ALIGNMENT) \
+  ((X) & ~((ALIGNMENT) - 1))
+
+#ifdef HOST_WIDE_INT
+struct GTY (()) aarch64_frame
+{
+  HOST_WIDE_INT reg_offset[FIRST_PSEUDO_REGISTER];
+  HOST_WIDE_INT saved_regs_size;
+  /* Padding if needed after the all the callee save registers have
+     been saved.  */
+  HOST_WIDE_INT padding0;
+  HOST_WIDE_INT hardfp_offset; /* HARD_FRAME_POINTER_REGNUM */
+  HOST_WIDE_INT fp_lr_offset;  /* Space needed for saving fp and/or lr */
+
+  bool laid_out;
+};
+
+typedef struct GTY (()) machine_function
+{
+  struct aarch64_frame frame;
+
+  /* The number of extra stack bytes taken up by register varargs.
+     This area is allocated by the callee at the very top of the frame.  */
+  HOST_WIDE_INT saved_varargs_size;
+
+} machine_function;
+#endif
+
+
+/* Which ABI to use.  */
+enum arm_abi_type
+{
+  ARM_ABI_AAPCS64
+};
+
+enum arm_pcs
+{
+  ARM_PCS_AAPCS64,             /* Base standard AAPCS for 64 bit.  */
+  ARM_PCS_UNKNOWN
+};
+
+
+extern enum arm_abi_type arm_abi;
+extern enum arm_pcs arm_pcs_variant;
+#ifndef ARM_DEFAULT_ABI
+#define ARM_DEFAULT_ABI ARM_ABI_AAPCS64
+#endif
+
+#ifndef ARM_DEFAULT_PCS
+#define ARM_DEFAULT_PCS ARM_PCS_AAPCS64
+#endif
+
+/* We can't use enum machine_mode inside a generator file because it
+   hasn't been created yet; we shouldn't be using any code that
+   needs the real definition though, so this ought to be safe.  */
+#ifdef GENERATOR_FILE
+#define MACHMODE int
+#else
+#include "insn-modes.h"
+#define MACHMODE enum machine_mode
+#endif
+
+
+/* AAPCS related state tracking.  */
+typedef struct
+{
+  enum arm_pcs pcs_variant;
+  int aapcs_arg_processed;     /* No need to lay out this argument again.  */
+  int aapcs_ncrn;              /* Next Core register number.  */
+  int aapcs_nextncrn;          /* Next next core register number.  */
+  int aapcs_nvrn;              /* Next Vector register number.  */
+  int aapcs_nextnvrn;          /* Next Next Vector register number.  */
+  rtx aapcs_reg;               /* Register assigned to this argument.  This
+                                  is NULL_RTX if this parameter goes on
+                                  the stack.  */
+  MACHMODE aapcs_vfp_rmode;
+  int aapcs_stack_words;       /* If the argument is passed on the stack, this
+                                  is the number of words needed, after rounding
+                                  up.  Only meaningful when
+                                  aapcs_reg == NULL_RTX.  */
+  int aapcs_stack_size;                /* The total size (in words, per 8 byte) of the
+                                  stack arg area so far.  */
+} CUMULATIVE_ARGS;
+
+#define FUNCTION_ARG_PADDING(MODE, TYPE) \
+  (aarch64_pad_arg_upward (MODE, TYPE) ? upward : downward)
+
+#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
+  (aarch64_pad_reg_upward (MODE, TYPE, FIRST) ? upward : downward)
+
+#define PAD_VARARGS_DOWN       0
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
+  aarch64_init_cumulative_args (&(CUM), FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS)
+
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+  aarch64_function_arg_regno_p(REGNO)
+\f
+
+/* ISA Features.  */
+
+/* Addressing modes, etc.  */
+#define HAVE_POST_INCREMENT    1
+#define HAVE_PRE_INCREMENT     1
+#define HAVE_POST_DECREMENT    1
+#define HAVE_PRE_DECREMENT     1
+#define HAVE_POST_MODIFY_DISP  1
+#define HAVE_PRE_MODIFY_DISP   1
+
+#define MAX_REGS_PER_ADDRESS   2
+
+#define CONSTANT_ADDRESS_P(X)          aarch64_constant_address_p(X)
+
+/* Try a machine-dependent way of reloading an illegitimate address
+   operand.  If we find one, push the reload and jump to WIN.  This
+   macro is used in only one place: `find_reloads_address' in reload.c.  */
+
+#define LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_L, WIN)         \
+do {                                                                        \
+  rtx new_x = aarch64_legitimize_reload_address (&(X), MODE, OPNUM, TYPE,    \
+                                                IND_L);                     \
+  if (new_x)                                                                \
+    {                                                                       \
+      X = new_x;                                                            \
+      goto WIN;                                                                     \
+    }                                                                       \
+} while (0)
+
+#define REGNO_OK_FOR_BASE_P(REGNO)     \
+  aarch64_regno_ok_for_base_p (REGNO, true)
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+  aarch64_regno_ok_for_index_p (REGNO, true)
+
+#define LEGITIMATE_PIC_OPERAND_P(X) \
+  aarch64_legitimate_pic_operand_p (X)
+
+#define CASE_VECTOR_MODE Pmode
+
+#define DEFAULT_SIGNED_CHAR 0
+
+/* An integer expression for the size in bits of the largest integer machine
+   mode that should actually be used.  We allow pairs of registers.  */
+#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TImode)
+
+/* Maximum bytes moved by a single instruction (load/store pair).  */
+#define MOVE_MAX (UNITS_PER_WORD * 2)
+
+/* The base cost overhead of a memcpy call, for MOVE_RATIO and friends.  */
+#define AARCH64_CALL_RATIO 8
+
+/* When optimizing for size, give a better estimate of the length of a memcpy
+   call, but use the default otherwise.  But move_by_pieces_ninsns() counts
+   memory-to-memory moves, and we'll have to generate a load & store for each,
+   so halve the value to take that into account.  */
+#define MOVE_RATIO(speed) \
+  (((speed) ? 15 : AARCH64_CALL_RATIO) / 2)
+
+/* For CLEAR_RATIO, when optimizing for size, give a better estimate
+   of the length of a memset call, but use the default otherwise.  */
+#define CLEAR_RATIO(speed) \
+  ((speed) ? 15 : AARCH64_CALL_RATIO)
+
+/* SET_RATIO is similar to CLEAR_RATIO, but for a non-zero constant, so when
+   optimizing for size adjust the ratio to account for the overhead of loading
+   the constant.  */
+#define SET_RATIO(speed) \
+  ((speed) ? 15 : AARCH64_CALL_RATIO - 2)
+
+/* STORE_BY_PIECES_P can be used when copying a constant string, but
+   in that case each 64-bit chunk takes 5 insns instead of 2 (LDR/STR).
+   For now we always fail this and let the move_by_pieces code copy
+   the string from read-only memory.  */
+#define STORE_BY_PIECES_P(SIZE, ALIGN) 0
+
+/* Disable auto-increment in move_by_pieces et al.  Use of auto-increment is
+   rarely a good idea in straight-line code since it adds an extra address
+   dependency between each instruction.  Better to use incrementing offsets.  */
+#define USE_LOAD_POST_INCREMENT(MODE)   0
+#define USE_LOAD_POST_DECREMENT(MODE)   0
+#define USE_LOAD_PRE_INCREMENT(MODE)    0
+#define USE_LOAD_PRE_DECREMENT(MODE)    0
+#define USE_STORE_POST_INCREMENT(MODE)  0
+#define USE_STORE_POST_DECREMENT(MODE)  0
+#define USE_STORE_PRE_INCREMENT(MODE)   0
+#define USE_STORE_PRE_DECREMENT(MODE)   0
+
+/* ?? #define WORD_REGISTER_OPERATIONS  */
+
+/* Define if loading from memory in MODE, an integral mode narrower than
+   BITS_PER_WORD will either zero-extend or sign-extend.  The value of this
+   macro should be the code that says which one of the two operations is
+   implicitly done, or UNKNOWN if none.  */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Define this macro to be non-zero if instructions will fail to work
+   if given data not on the nominal alignment.  */
+#define STRICT_ALIGNMENT               TARGET_STRICT_ALIGN
+
+/* Define this macro to be non-zero if accessing less than a word of
+   memory is no faster than accessing a word of memory, i.e., if such
+   accesses require more than one instruction or if there is no
+   difference in cost.
+   Although there's no difference in instruction count or cycles,
+   in AArch64 we don't want to expand to a sub-word to a 64-bit access
+   if we don't have to, for power-saving reasons.  */
+#define SLOW_BYTE_ACCESS               0
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+#define NO_FUNCTION_CSE        1
+
+#define Pmode          DImode
+#define FUNCTION_MODE  Pmode
+
+#define SELECT_CC_MODE(OP, X, Y)       aarch64_select_cc_mode (OP, X, Y)
+
+#define REVERSE_CONDITION(CODE, MODE)          \
+  (((MODE) == CCFPmode || (MODE) == CCFPEmode) \
+   ? reverse_condition_maybe_unordered (CODE)  \
+   : reverse_condition (CODE))
+
+#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+  ((VALUE) = ((MODE) == SImode ? 32 : 64), 2)
+#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+  ((VALUE) = ((MODE) == SImode ? 32 : 64), 2)
+
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
+
+#define RETURN_ADDR_RTX aarch64_return_addr
+
+#define TRAMPOLINE_SIZE        aarch64_trampoline_size ()
+
+/* Trampolines contain dwords, so must be dword aligned.  */
+#define TRAMPOLINE_ALIGNMENT 64
+
+/* Put trampolines in the text section so that mapping symbols work
+   correctly.  */
+#define TRAMPOLINE_SECTION text_section
+\f
+/* Costs, etc.  */
+#define MEMORY_MOVE_COST(M, CLASS, IN) \
+  (GET_MODE_SIZE (M) < 8 ? 8 : GET_MODE_SIZE (M))
+
+/* To start with.  */
+#define BRANCH_COST(SPEED_P, PREDICTABLE_P) 2
+\f
+
+/* Assembly output.  */
+
+/* For now we'll make all jump tables pc-relative.  */
+#define CASE_VECTOR_PC_RELATIVE        1
+
+#define CASE_VECTOR_SHORTEN_MODE(min, max, body)       \
+  ((min < -0x1fff0 || max > 0x1fff0) ? SImode          \
+   : (min < -0x1f0 || max > 0x1f0) ? HImode            \
+   : QImode)
+
+/* Jump table alignment is explicit in ASM_OUTPUT_CASE_LABEL.  */
+#define ADDR_VEC_ALIGN(JUMPTABLE) 0
+
+#define PRINT_OPERAND(STREAM, X, CODE) aarch64_print_operand (STREAM, X, CODE)
+
+#define PRINT_OPERAND_ADDRESS(STREAM, X) \
+  aarch64_print_operand_address (STREAM, X)
+
+#define FUNCTION_PROFILER(STREAM, LABELNO) \
+  aarch64_function_profiler (STREAM, LABELNO)
+
+/* For some reason, the Linux headers think they know how to define
+   these macros.  They don't!!!  */
+#undef ASM_APP_ON
+#undef ASM_APP_OFF
+#define ASM_APP_ON     "\t" ASM_COMMENT_START " Start of user assembly\n"
+#define ASM_APP_OFF    "\t" ASM_COMMENT_START " End of user assembly\n"
+
+#define ASM_FPRINTF_EXTENSIONS(FILE, ARGS, P)          \
+  case '@':                                            \
+    fputs (ASM_COMMENT_START, FILE);                   \
+    break;                                             \
+                                                       \
+  case 'r':                                            \
+    fputs (REGISTER_PREFIX, FILE);                     \
+    fputs (reg_names[va_arg (ARGS, int)], FILE);       \
+    break;
+
+#define CONSTANT_POOL_BEFORE_FUNCTION 0
+
+/* This definition should be relocated to aarch64-elf-raw.h.  This macro
+   should be undefined in aarch64-linux.h and a clear_cache pattern
+   implmented to emit either the call to __aarch64_sync_cache_range()
+   directly or preferably the appropriate sycall or cache clear
+   instructions inline.  */
+#define CLEAR_INSN_CACHE(beg, end)                             \
+  extern void  __aarch64_sync_cache_range (void *, void *);    \
+  __aarch64_sync_cache_range (beg, end)
+
+/* This should be integrated with the equivalent in the 32 bit
+   world.  */
+enum aarch64_builtins
+{
+  AARCH64_BUILTIN_MIN,
+  AARCH64_BUILTIN_THREAD_POINTER,
+  AARCH64_SIMD_BUILTIN_BASE
+};
+
+/*  VFP registers may only be accessed in the mode they
+   were set.  */
+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS)      \
+  (GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO)          \
+   ? reg_classes_intersect_p (FP_REGS, (CLASS))                \
+   : 0)
+
+
+#define SHIFT_COUNT_TRUNCATED !TARGET_SIMD
+
+/* Callee only saves lower 64-bits of a 128-bit register.  Tell the
+   compiler the callee clobbers the top 64-bits when restoring the
+   bottom 64-bits.  */
+#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) \
+               (FP_REGNUM_P (REGNO) && GET_MODE_SIZE (MODE) > 8)
+
+/* Check TLS Descriptors mechanism is selected.  */
+#define TARGET_TLS_DESC (aarch64_tls_dialect == TLS_DESCRIPTORS)
+
+extern enum aarch64_code_model aarch64_cmodel;
+
+/* When using the tiny addressing model conditional and unconditional branches
+   can span the whole of the available address space (1MB).  */
+#define HAS_LONG_COND_BRANCH                           \
+  (aarch64_cmodel == AARCH64_CMODEL_TINY               \
+   || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)
+
+#define HAS_LONG_UNCOND_BRANCH                         \
+  (aarch64_cmodel == AARCH64_CMODEL_TINY               \
+   || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)
+
+/* Modes valid for AdvSIMD Q registers.  */
+#define AARCH64_VALID_SIMD_QREG_MODE(MODE) \
+  ((MODE) == V4SImode || (MODE) == V8HImode || (MODE) == V16QImode \
+   || (MODE) == V4SFmode || (MODE) == V2DImode || mode == V2DFmode)
+
+#endif /* GCC_AARCH64_H */
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
new file mode 100644 (file)
index 0000000..b803922
--- /dev/null
@@ -0,0 +1,2930 @@
+;; Machine description for AArch64 architecture.
+;; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Register numbers
+(define_constants
+  [
+    (R0_REGNUM         0)
+    (R1_REGNUM         1)
+    (R2_REGNUM         2)
+    (R3_REGNUM         3)
+    (R4_REGNUM         4)
+    (R5_REGNUM         5)
+    (R6_REGNUM         6)
+    (R7_REGNUM         7)
+    (R8_REGNUM         8)
+    (R9_REGNUM         9)
+    (R10_REGNUM                10)
+    (R11_REGNUM                11)
+    (R12_REGNUM                12)
+    (R13_REGNUM                13)
+    (R14_REGNUM                14)
+    (R15_REGNUM                15)
+    (R16_REGNUM                16)
+    (IP0_REGNUM                16)
+    (R17_REGNUM                17)
+    (IP1_REGNUM                17)
+    (R18_REGNUM                18)
+    (R19_REGNUM                19)
+    (R20_REGNUM                20)
+    (R21_REGNUM                21)
+    (R22_REGNUM                22)
+    (R23_REGNUM                23)
+    (R24_REGNUM                24)
+    (R25_REGNUM                25)
+    (R26_REGNUM                26)
+    (R27_REGNUM                27)
+    (R28_REGNUM                28)
+    (R29_REGNUM                29)
+    (R30_REGNUM                30)
+    (LR_REGNUM         30)
+    (SP_REGNUM         31)
+    (V0_REGNUM         32)
+    (V15_REGNUM                47)
+    (V31_REGNUM                63)
+    (SFP_REGNUM                64)
+    (AP_REGNUM         65)
+    (CC_REGNUM         66)
+  ]
+)
+
+(define_c_enum "unspec" [
+    UNSPEC_CASESI
+    UNSPEC_CLS
+    UNSPEC_FRINTA
+    UNSPEC_FRINTI
+    UNSPEC_FRINTM
+    UNSPEC_FRINTP
+    UNSPEC_FRINTX
+    UNSPEC_FRINTZ
+    UNSPEC_GOTSMALLPIC
+    UNSPEC_GOTSMALLTLS
+    UNSPEC_LD2
+    UNSPEC_LD3
+    UNSPEC_LD4
+    UNSPEC_MB
+    UNSPEC_NOP
+    UNSPEC_PRLG_STK
+    UNSPEC_RBIT
+    UNSPEC_ST2
+    UNSPEC_ST3
+    UNSPEC_ST4
+    UNSPEC_TLS
+    UNSPEC_TLSDESC
+    UNSPEC_VSTRUCTDUMMY
+])
+
+(define_c_enum "unspecv" [
+    UNSPECV_EH_RETURN          ; Represent EH_RETURN
+  ]
+)
+
+;; If further include files are added the defintion of MD_INCLUDES
+;; must be updated.
+
+(include "constraints.md")
+(include "predicates.md")
+(include "iterators.md")
+
+;; -------------------------------------------------------------------
+;; Synchronization Builtins
+;; -------------------------------------------------------------------
+
+;; The following sync_* attributes are applied to sychronization
+;; instruction patterns to control the way in which the
+;; synchronization loop is expanded.
+;; All instruction patterns that call aarch64_output_sync_insn ()
+;; should define these attributes.  Refer to the comment above
+;; aarch64.c:aarch64_output_sync_loop () for more detail on the use of
+;; these attributes.
+
+;; Attribute specifies the operand number which contains the
+;; result of a synchronization operation.  The result is the old value
+;; loaded from SYNC_MEMORY.
+(define_attr "sync_result"          "none,0,1,2,3,4,5" (const_string "none"))
+
+;; Attribute specifies the operand number which contains the memory
+;; address to which the synchronization operation is being applied.
+(define_attr "sync_memory"          "none,0,1,2,3,4,5" (const_string "none"))
+
+;; Attribute specifies the operand number which contains the required
+;; old value expected in the memory location.  This attribute may be
+;; none if no required value test should be performed in the expanded
+;; code.
+(define_attr "sync_required_value"  "none,0,1,2,3,4,5" (const_string "none"))
+
+;; Attribute specifies the operand number of the new value to be stored
+;; into the memory location identitifed by the sync_memory attribute.
+(define_attr "sync_new_value"       "none,0,1,2,3,4,5" (const_string "none"))
+
+;; Attribute specifies the operand number of a temporary register
+;; which can be clobbered by the synchronization instruction sequence.
+;; The register provided byn SYNC_T1 may be the same as SYNC_RESULT is
+;; which case the result value will be clobbered and not available
+;; after the synchronization loop exits.
+(define_attr "sync_t1"              "none,0,1,2,3,4,5" (const_string "none"))
+
+;; Attribute specifies the operand number of a temporary register
+;; which can be clobbered by the synchronization instruction sequence.
+;; This register is used to collect the result of a store exclusive
+;; instruction.
+(define_attr "sync_t2"              "none,0,1,2,3,4,5" (const_string "none"))
+
+;; Attribute that specifies whether or not the emitted synchronization
+;; loop must contain a release barrier.
+(define_attr "sync_release_barrier" "yes,no"           (const_string "yes"))
+
+;; Attribute that specifies the operation that the synchronization
+;; loop should apply to the old and new values to generate the value
+;; written back to memory.
+(define_attr "sync_op"              "none,add,sub,ior,xor,and,nand"
+                                    (const_string "none"))
+
+;; -------------------------------------------------------------------
+;; Instruction types and attributes
+;; -------------------------------------------------------------------
+
+;; Main data types used by the insntructions
+
+(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF"
+  (const_string "unknown"))
+
+(define_attr "mode2" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF"
+  (const_string "unknown"))
+
+; The "v8type" attribute is used to for fine grained classification of
+; AArch64 instructions.  This table briefly explains the meaning of each type.
+
+; adc              add/subtract with carry.
+; adcs             add/subtract with carry (setting condition flags).
+; adr              calculate address.
+; alu              simple alu instruction (no memory or fp regs access).
+; alu_ext          simple alu instruction (sign/zero-extended register).
+; alu_shift        simple alu instruction, with a source operand shifted by a constant.
+; alus             simple alu instruction (setting condition flags).
+; alus_ext         simple alu instruction (sign/zero-extended register, setting condition flags).
+; alus_shift       simple alu instruction, with a source operand shifted by a constant (setting condition flags).
+; bfm              bitfield move operation.
+; branch           branch.
+; call             subroutine call.
+; ccmp             conditional compare.
+; clz              count leading zeros/sign bits.
+; csel             conditional select.
+; dmb              data memory barrier.
+; extend           sign/zero-extend (specialised bitfield move).
+; extr             extract register-sized bitfield encoding.
+; fpsimd_load      load single floating point / simd scalar register from memory.
+; fpsimd_load2     load pair of floating point / simd scalar registers from memory.
+; fpsimd_store     store single floating point / simd scalar register to memory.
+; fpsimd_store2    store pair floating point / simd scalar registers to memory.
+; fadd             floating point add/sub.
+; fccmp            floating point conditional compare.
+; fcmp             floating point comparison.
+; fconst           floating point load immediate.
+; fcsel            floating point conditional select.
+; fcvt             floating point convert (float to float).
+; fcvtf2i          floating point convert (float to integer).
+; fcvti2f          floating point convert (integer to float).
+; fdiv             floating point division operation.
+; ffarith          floating point abs, neg or cpy.
+; fmadd            floating point multiply-add/sub.
+; fminmax          floating point min/max.
+; fmov             floating point move (float to float).
+; fmovf2i          floating point move (float to integer).
+; fmovi2f          floating point move (integer to float).
+; fmul             floating point multiply.
+; frint            floating point round to integral.
+; fsqrt            floating point square root.
+; load_acq         load-acquire.
+; load             load single general register from memory
+; load2            load pair of general registers from memory
+; logic            logical operation (register).
+; logic_imm        and/or/xor operation (immediate).
+; logic_shift      logical operation with shift.
+; logics           logical operation (register, setting condition flags).
+; logics_imm       and/or/xor operation (immediate, setting condition flags).
+; logics_shift     logical operation with shift (setting condition flags).
+; madd             integer multiply-add/sub.
+; maddl            widening integer multiply-add/sub.
+; misc             miscellaneous - any type that doesn't fit into the rest.
+; move             integer move operation.
+; move2            double integer move operation.
+; movk             move 16-bit immediate with keep.
+; movz             move 16-bit immmediate with zero/one.
+; mrs              system/special register move.
+; mulh             64x64 to 128-bit multiply (high part).
+; mull             widening multiply.
+; mult             integer multiply instruction.
+; prefetch         memory prefetch.
+; rbit             reverse bits.
+; rev              reverse bytes.
+; sdiv             integer division operation (signed).
+; shift            variable shift operation.
+; shift_imm        immediate shift operation (specialised bitfield move).
+; store_rel        store-release.
+; store            store single general register to memory.
+; store2           store pair of general registers to memory.
+; udiv             integer division operation (unsigned).
+
+(define_attr "v8type"
+   "adc,\
+   adcs,\
+   adr,\
+   alu,\
+   alu_ext,\
+   alu_shift,\
+   alus,\
+   alus_ext,\
+   alus_shift,\
+   bfm,\
+   branch,\
+   call,\
+   ccmp,\
+   clz,\
+   csel,\
+   dmb,\
+   div,\
+   div64,\
+   extend,\
+   extr,\
+   fpsimd_load,\
+   fpsimd_load2,\
+   fpsimd_store2,\
+   fpsimd_store,\
+   fadd,\
+   fccmp,\
+   fcvt,\
+   fcvtf2i,\
+   fcvti2f,\
+   fcmp,\
+   fconst,\
+   fcsel,\
+   fdiv,\
+   ffarith,\
+   fmadd,\
+   fminmax,\
+   fmov,\
+   fmovf2i,\
+   fmovi2f,\
+   fmul,\
+   frint,\
+   fsqrt,\
+   load_acq,\
+   load1,\
+   load2,\
+   logic,\
+   logic_imm,\
+   logic_shift,\
+   logics,\
+   logics_imm,\
+   logics_shift,\
+   madd,\
+   maddl,\
+   misc,\
+   move,\
+   move2,\
+   movk,\
+   movz,\
+   mrs,\
+   mulh,\
+   mull,\
+   mult,\
+   prefetch,\
+   rbit,\
+   rev,\
+   sdiv,\
+   shift,\
+   shift_imm,\
+   store_rel,\
+   store1,\
+   store2,\
+   udiv"
+  (const_string "alu"))
+
+
+; The "type" attribute is used by the AArch32 backend.  Below is a mapping
+; from "v8type" to "type".
+
+(define_attr "type"
+  "alu,alu_shift,block,branch,call,f_2_r,f_cvt,f_flag,f_loads,
+   f_loadd,f_stored,f_stores,faddd,fadds,fcmpd,fcmps,fconstd,fconsts,
+   fcpys,fdivd,fdivs,ffarithd,ffariths,fmacd,fmacs,fmuld,fmuls,load_byte,
+   load1,load2,mult,r_2_f,store1,store2"
+  (cond [
+         (eq_attr "v8type" "alu_shift,alus_shift,logic_shift,logics_shift") (const_string "alu_shift")
+         (eq_attr "v8type" "branch") (const_string "branch")
+         (eq_attr "v8type" "call") (const_string "call")
+         (eq_attr "v8type" "fmovf2i") (const_string "f_2_r")
+         (eq_attr "v8type" "fcvt,fcvtf2i,fcvti2f") (const_string "f_cvt")
+         (and (eq_attr "v8type" "fpsimd_load") (eq_attr "mode" "SF")) (const_string "f_loads")
+         (and (eq_attr "v8type" "fpsimd_load") (eq_attr "mode" "DF")) (const_string "f_loadd")
+         (and (eq_attr "v8type" "fpsimd_store") (eq_attr "mode" "SF")) (const_string "f_stores")
+         (and (eq_attr "v8type" "fpsimd_store") (eq_attr "mode" "DF")) (const_string "f_stored")
+         (and (eq_attr "v8type" "fadd,fminmax") (eq_attr "mode" "DF")) (const_string "faddd")
+         (and (eq_attr "v8type" "fadd,fminmax") (eq_attr "mode" "SF")) (const_string "fadds")
+         (and (eq_attr "v8type" "fcmp,fccmp") (eq_attr "mode" "DF")) (const_string "fcmpd")
+         (and (eq_attr "v8type" "fcmp,fccmp") (eq_attr "mode" "SF")) (const_string "fcmps")
+         (and (eq_attr "v8type" "fconst") (eq_attr "mode" "DF")) (const_string "fconstd")
+         (and (eq_attr "v8type" "fconst") (eq_attr "mode" "SF")) (const_string "fconsts")
+         (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "DF")) (const_string "fdivd")
+         (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "SF")) (const_string "fdivs")
+         (and (eq_attr "v8type" "ffarith") (eq_attr "mode" "DF")) (const_string "ffarithd")
+         (and (eq_attr "v8type" "ffarith") (eq_attr "mode" "SF")) (const_string "ffariths")
+         (and (eq_attr "v8type" "fmadd") (eq_attr "mode" "DF")) (const_string "fmacd")
+         (and (eq_attr "v8type" "fmadd") (eq_attr "mode" "SF")) (const_string "fmacs")
+         (and (eq_attr "v8type" "fmul") (eq_attr "mode" "DF")) (const_string "fmuld")
+         (and (eq_attr "v8type" "fmul") (eq_attr "mode" "SF")) (const_string "fmuls")
+         (and (eq_attr "v8type" "load1") (eq_attr "mode" "QI,HI")) (const_string "load_byte")
+         (and (eq_attr "v8type" "load1") (eq_attr "mode" "SI,DI,TI")) (const_string "load1")
+         (eq_attr "v8type" "load2") (const_string "load2")
+         (and (eq_attr "v8type" "mulh,mult,mull,madd,sdiv,udiv") (eq_attr "mode" "SI")) (const_string "mult")
+         (eq_attr "v8type" "fmovi2f") (const_string "r_2_f")
+         (eq_attr "v8type" "store1") (const_string "store1")
+         (eq_attr "v8type" "store2") (const_string "store2")
+  ]
+  (const_string "alu")))
+
+;; Attribute that specifies whether or not the instruction touches fp
+;; registers.
+(define_attr "fp" "no,yes" (const_string "no"))
+
+;; Attribute that specifies whether or not the instruction touches simd
+;; registers.
+(define_attr "simd" "no,yes" (const_string "no"))
+
+(define_attr "length" ""
+  (cond [(not (eq_attr "sync_memory" "none"))
+          (symbol_ref "aarch64_sync_loop_insns (insn, operands) * 4")
+       ] (const_int 4)))
+
+;; Attribute that controls whether an alternative is enabled or not.
+;; Currently it is only used to disable alternatives which touch fp or simd
+;; registers when -mgeneral-regs-only is specified.
+(define_attr "enabled" "no,yes"
+  (cond [(ior
+       (and (eq_attr "fp" "yes")
+            (eq (symbol_ref "TARGET_FLOAT") (const_int 0)))
+       (and (eq_attr "simd" "yes")
+            (eq (symbol_ref "TARGET_SIMD") (const_int 0))))
+            (const_string "no")
+       ] (const_string "yes")))
+
+;; -------------------------------------------------------------------
+;; Pipeline descriptions and scheduling
+;; -------------------------------------------------------------------
+
+;; Processor types.
+(include "aarch64-tune.md")
+
+;; Scheduling
+(include "aarch64-generic.md")
+(include "large.md")
+(include "small.md")
+
+;; -------------------------------------------------------------------
+;; Jumps and other miscellaneous insns
+;; -------------------------------------------------------------------
+
+(define_insn "indirect_jump"
+  [(set (pc) (match_operand:DI 0 "register_operand" "r"))]
+  ""
+  "br\\t%0"
+  [(set_attr "v8type" "branch")]
+)
+
+(define_insn "jump"
+  [(set (pc) (label_ref (match_operand 0 "" "")))]
+  ""
+  "b\\t%l0"
+  [(set_attr "v8type" "branch")]
+)
+
+(define_expand "cbranch<mode>4"
+  [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
+                           [(match_operand:GPI 1 "register_operand" "")
+                            (match_operand:GPI 2 "aarch64_plus_operand" "")])
+                          (label_ref (match_operand 3 "" ""))
+                          (pc)))]
+  ""
+  "
+  operands[1] = aarch64_gen_compare_reg (GET_CODE (operands[0]), operands[1],
+                                        operands[2]);
+  operands[2] = const0_rtx;
+  "
+)
+
+(define_expand "cbranch<mode>4"
+  [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
+                           [(match_operand:GPF 1 "register_operand" "")
+                            (match_operand:GPF 2 "aarch64_reg_or_zero" "")])
+                          (label_ref (match_operand 3 "" ""))
+                          (pc)))]
+  ""
+  "
+  operands[1] = aarch64_gen_compare_reg (GET_CODE (operands[0]), operands[1],
+                                        operands[2]);
+  operands[2] = const0_rtx;
+  "
+)
+
+(define_insn "*condjump"
+  [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
+                           [(match_operand 1 "cc_register" "") (const_int 0)])
+                          (label_ref (match_operand 2 "" ""))
+                          (pc)))]
+  ""
+  "b%m0\\t%l2"
+  [(set_attr "v8type" "branch")]
+)
+
+(define_expand "casesi"
+  [(match_operand:SI 0 "register_operand" "")  ; Index
+   (match_operand:SI 1 "const_int_operand" "") ; Lower bound
+   (match_operand:SI 2 "const_int_operand" "") ; Total range
+   (match_operand:DI 3 "" "")                  ; Table label
+   (match_operand:DI 4 "" "")]                 ; Out of range label
+  ""
+  {
+    if (operands[1] != const0_rtx)
+      {
+       rtx reg = gen_reg_rtx (SImode);
+
+       /* Canonical RTL says that if you have:
+
+          (minus (X) (CONST))
+
+           then this should be emitted as:
+
+           (plus (X) (-CONST))
+
+          The use of trunc_int_for_mode ensures that the resulting
+          constant can be represented in SImode, this is important
+          for the corner case where operand[1] is INT_MIN.  */
+
+       operands[1] = GEN_INT (trunc_int_for_mode (-INTVAL (operands[1]), SImode));
+
+       if (!(*insn_data[CODE_FOR_addsi3].operand[2].predicate)
+             (operands[1], SImode))
+         operands[1] = force_reg (SImode, operands[1]);
+       emit_insn (gen_addsi3 (reg, operands[0], operands[1]));
+       operands[0] = reg;
+      }
+
+    if (!aarch64_plus_operand (operands[2], SImode))
+      operands[2] = force_reg (SImode, operands[2]);
+    emit_jump_insn (gen_cbranchsi4 (gen_rtx_GTU (SImode, const0_rtx,
+                                                const0_rtx),
+                                   operands[0], operands[2], operands[4]));
+
+    operands[2] = force_reg (DImode, gen_rtx_LABEL_REF (VOIDmode, operands[3]));
+    emit_jump_insn (gen_casesi_dispatch (operands[2], operands[0],
+                                        operands[3]));
+    DONE;
+  }
+)
+
+(define_insn "casesi_dispatch"
+  [(parallel
+    [(set (pc)
+         (mem:DI (unspec [(match_operand:DI 0 "register_operand" "r")
+                          (match_operand:SI 1 "register_operand" "r")]
+                       UNSPEC_CASESI)))
+     (clobber (reg:CC CC_REGNUM))
+     (clobber (match_scratch:DI 3 "=r"))
+     (clobber (match_scratch:DI 4 "=r"))
+     (use (label_ref (match_operand 2 "" "")))])]
+  ""
+  "*
+  return aarch64_output_casesi (operands);
+  "
+  [(set_attr "length" "16")
+   (set_attr "v8type" "branch")]
+)
+
+(define_insn "nop"
+  [(unspec[(const_int 0)] UNSPEC_NOP)]
+  ""
+  "nop"
+  [(set_attr "v8type" "misc")]
+)
+
+(define_expand "prologue"
+  [(clobber (const_int 0))]
+  ""
+  "
+  aarch64_expand_prologue ();
+  DONE;
+  "
+)
+
+(define_expand "epilogue"
+  [(clobber (const_int 0))]
+  ""
+  "
+  aarch64_expand_epilogue (false);
+  DONE;
+  "
+)
+
+(define_expand "sibcall_epilogue"
+  [(clobber (const_int 0))]
+  ""
+  "
+  aarch64_expand_epilogue (true);
+  DONE;
+  "
+)
+
+(define_insn "*do_return"
+  [(return)]
+  ""
+  "ret"
+  [(set_attr "v8type" "branch")]
+)
+
+(define_insn "eh_return"
+  [(unspec_volatile [(match_operand:DI 0 "register_operand" "r")]
+    UNSPECV_EH_RETURN)]
+  ""
+  "#"
+  [(set_attr "v8type" "branch")]
+)
+
+(define_split
+  [(unspec_volatile [(match_operand:DI 0 "register_operand" "")]
+    UNSPECV_EH_RETURN)]
+  "reload_completed"
+  [(set (match_dup 1) (match_dup 0))]
+  {
+    operands[1] = aarch64_final_eh_return_addr ();
+  }
+)
+
+(define_insn "*cb<optab><mode>1"
+  [(set (pc) (if_then_else (EQL (match_operand:GPI 0 "register_operand" "r")
+                               (const_int 0))
+                          (label_ref (match_operand 1 "" ""))
+                          (pc)))]
+  ""
+  "<cbz>\\t%<w>0, %l1"
+  [(set_attr "v8type" "branch")]
+)
+
+(define_insn "*tb<optab><mode>1"
+  [(set (pc) (if_then_else
+             (EQL (zero_extract:DI (match_operand:GPI 0 "register_operand" "r")
+                                   (const_int 1)
+                                   (match_operand 1 "const_int_operand" "n"))
+                  (const_int 0))
+            (label_ref (match_operand 2 "" ""))
+            (pc)))
+   (clobber (match_scratch:DI 3 "=r"))]
+  ""
+  "*
+  if (get_attr_length (insn) == 8)
+    return \"ubfx\\t%<w>3, %<w>0, %1, #1\;<cbz>\\t%<w>3, %l2\";
+  return \"<tbz>\\t%<w>0, %1, %l2\";
+  "
+  [(set_attr "v8type" "branch")
+   (set_attr "mode" "<MODE>")
+   (set (attr "length")
+       (if_then_else (and (ge (minus (match_dup 2) (pc)) (const_int -32768))
+                          (lt (minus (match_dup 2) (pc)) (const_int 32764)))
+                     (const_int 4)
+                     (const_int 8)))]
+)
+
+(define_insn "*cb<optab><mode>1"
+  [(set (pc) (if_then_else (LTGE (match_operand:ALLI 0 "register_operand" "r")
+                                (const_int 0))
+                          (label_ref (match_operand 1 "" ""))
+                          (pc)))
+   (clobber (match_scratch:DI 2 "=r"))]
+  ""
+  "*
+  if (get_attr_length (insn) == 8)
+    return \"ubfx\\t%<w>2, %<w>0, <sizem1>, #1\;<cbz>\\t%<w>2, %l1\";
+  return \"<tbz>\\t%<w>0, <sizem1>, %l1\";
+  "
+  [(set_attr "v8type" "branch")
+   (set_attr "mode" "<MODE>")
+   (set (attr "length")
+       (if_then_else (and (ge (minus (match_dup 1) (pc)) (const_int -32768))
+                          (lt (minus (match_dup 1) (pc)) (const_int 32764)))
+                     (const_int 4)
+                     (const_int 8)))]
+)
+
+;; -------------------------------------------------------------------
+;; Subroutine calls and sibcalls
+;; -------------------------------------------------------------------
+
+(define_expand "call"
+  [(parallel [(call (match_operand 0 "memory_operand" "")
+                   (match_operand 1 "general_operand" ""))
+             (use (match_operand 2 "" ""))
+             (clobber (reg:DI LR_REGNUM))])]
+  ""
+  "
+  {
+    rtx callee;
+
+    /* In an untyped call, we can get NULL for operand 2.  */
+    if (operands[2] == NULL)
+      operands[2] = const0_rtx;
+
+    /* Decide if we should generate indirect calls by loading the
+       64-bit address of the callee into a register before performing
+       the branch-and-link.  */
+    callee = XEXP (operands[0], 0);
+    if (GET_CODE (callee) == SYMBOL_REF
+       ? aarch64_is_long_call_p (callee)
+       : !REG_P (callee))
+      XEXP (operands[0], 0) = force_reg (Pmode, callee);
+  }"
+)
+
+(define_insn "*call_reg"
+  [(call (mem:DI (match_operand:DI 0 "register_operand" "r"))
+        (match_operand 1 "" ""))
+   (use (match_operand 2 "" ""))
+   (clobber (reg:DI LR_REGNUM))]
+  ""
+  "blr\\t%0"
+  [(set_attr "v8type" "call")]
+)
+
+(define_insn "*call_symbol"
+  [(call (mem:DI (match_operand:DI 0 "" ""))
+        (match_operand 1 "" ""))
+   (use (match_operand 2 "" ""))
+   (clobber (reg:DI LR_REGNUM))]
+  "GET_CODE (operands[0]) == SYMBOL_REF
+   && !aarch64_is_long_call_p (operands[0])"
+  "bl\\t%a0"
+  [(set_attr "v8type" "call")]
+)
+
+(define_expand "call_value"
+  [(parallel [(set (match_operand 0 "" "")
+                  (call (match_operand 1 "memory_operand" "")
+                        (match_operand 2 "general_operand" "")))
+             (use (match_operand 3 "" ""))
+             (clobber (reg:DI LR_REGNUM))])]
+  ""
+  "
+  {
+    rtx callee;
+
+    /* In an untyped call, we can get NULL for operand 3.  */
+    if (operands[3] == NULL)
+      operands[3] = const0_rtx;
+
+    /* Decide if we should generate indirect calls by loading the
+       64-bit address of the callee into a register before performing
+       the branch-and-link.  */
+    callee = XEXP (operands[1], 0);
+    if (GET_CODE (callee) == SYMBOL_REF
+       ? aarch64_is_long_call_p (callee)
+       : !REG_P (callee))
+      XEXP (operands[1], 0) = force_reg (Pmode, callee);
+  }"
+)
+
+(define_insn "*call_value_reg"
+  [(set (match_operand 0 "" "")
+       (call (mem:DI (match_operand:DI 1 "register_operand" "r"))
+                     (match_operand 2 "" "")))
+   (use (match_operand 3 "" ""))
+   (clobber (reg:DI LR_REGNUM))]
+  ""
+  "blr\\t%1"
+  [(set_attr "v8type" "call")]
+)
+
+(define_insn "*call_value_symbol"
+  [(set (match_operand 0 "" "")
+       (call (mem:DI (match_operand:DI 1 "" ""))
+             (match_operand 2 "" "")))
+   (use (match_operand 3 "" ""))
+   (clobber (reg:DI LR_REGNUM))]
+  "GET_CODE (operands[1]) == SYMBOL_REF
+   && !aarch64_is_long_call_p (operands[1])"
+  "bl\\t%a1"
+  [(set_attr "v8type" "call")]
+)
+
+(define_expand "sibcall"
+  [(parallel [(call (match_operand 0 "memory_operand" "")
+                   (match_operand 1 "general_operand" ""))
+             (return)
+             (use (match_operand 2 "" ""))])]
+  ""
+  {
+    if (operands[2] == NULL_RTX)
+      operands[2] = const0_rtx;
+  }
+)
+
+(define_expand "sibcall_value"
+  [(parallel [(set (match_operand 0 "" "")
+                  (call (match_operand 1 "memory_operand" "")
+                        (match_operand 2 "general_operand" "")))
+             (return)
+             (use (match_operand 3 "" ""))])]
+  ""
+  {
+    if (operands[3] == NULL_RTX)
+      operands[3] = const0_rtx;
+  }
+)
+
+(define_insn "*sibcall_insn"
+  [(call (mem:DI (match_operand:DI 0 "" "X"))
+        (match_operand 1 "" ""))
+   (return)
+   (use (match_operand 2 "" ""))]
+  "GET_CODE (operands[0]) == SYMBOL_REF"
+  "b\\t%a0"
+  [(set_attr "v8type" "branch")]
+)
+
+(define_insn "*sibcall_value_insn"
+  [(set (match_operand 0 "" "")
+       (call (mem:DI (match_operand 1 "" "X"))
+             (match_operand 2 "" "")))
+   (return)
+   (use (match_operand 3 "" ""))]
+  "GET_CODE (operands[1]) == SYMBOL_REF"
+  "b\\t%a1"
+  [(set_attr "v8type" "branch")]
+)
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+  [(parallel [(call (match_operand 0 "")
+                   (const_int 0))
+             (match_operand 1 "")
+             (match_operand 2 "")])]
+  ""
+{
+  int i;
+
+  emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
+
+  for (i = 0; i < XVECLEN (operands[2], 0); i++)
+    {
+      rtx set = XVECEXP (operands[2], 0, i);
+      emit_move_insn (SET_DEST (set), SET_SRC (set));
+    }
+
+  /* The optimizer does not know that the call sets the function value
+     registers we stored in the result block.  We avoid problems by
+     claiming that all hard registers are used and clobbered at this
+     point.  */
+  emit_insn (gen_blockage ());
+  DONE;
+})
+
+;; -------------------------------------------------------------------
+;; Moves
+;; -------------------------------------------------------------------
+
+(define_expand "mov<mode>"
+  [(set (match_operand:SHORT 0 "nonimmediate_operand" "")
+       (match_operand:SHORT 1 "general_operand" ""))]
+  ""
+  "
+    if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
+      operands[1] = force_reg (<MODE>mode, operands[1]);
+  "
+)
+
+(define_insn "*mov<mode>_aarch64"
+  [(set (match_operand:SHORT 0 "nonimmediate_operand" "=r,r,r,m,  r,*w")
+        (match_operand:SHORT 1 "general_operand"      " r,M,m,rZ,*w,r"))]
+  "(register_operand (operands[0], <MODE>mode)
+    || aarch64_reg_or_zero (operands[1], <MODE>mode))"
+  "@
+   mov\\t%w0, %w1
+   mov\\t%w0, %1
+   ldr<size>\\t%w0, %1
+   str<size>\\t%w1, %0
+   umov\\t%w0, %1.<v>[0]
+   dup\\t%0.<Vallxd>, %w1"
+  [(set_attr "v8type" "move,alu,load1,store1,*,*")
+   (set_attr "simd_type" "*,*,*,*,simd_movgp,simd_dupgp")
+   (set_attr "mode" "<MODE>")
+   (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "mov<mode>"
+  [(set (match_operand:GPI 0 "nonimmediate_operand" "")
+       (match_operand:GPI 1 "general_operand" ""))]
+  ""
+  "
+    if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
+      operands[1] = force_reg (<MODE>mode, operands[1]);
+
+    if (CONSTANT_P (operands[1]))
+      {
+       aarch64_expand_mov_immediate (operands[0], operands[1]);
+       DONE;
+      }
+  "
+)
+
+(define_insn "*movsi_aarch64"
+  [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,m, *w, r,*w")
+       (match_operand:SI 1 "aarch64_mov_operand"     " r,M,m,rZ,rZ,*w,*w"))]
+  "(register_operand (operands[0], SImode)
+    || aarch64_reg_or_zero (operands[1], SImode))"
+  "@
+   mov\\t%w0, %w1
+   mov\\t%w0, %1
+   ldr\\t%w0, %1
+   str\\t%w1, %0
+   fmov\\t%s0, %w1
+   fmov\\t%w0, %s1
+   fmov\\t%s0, %s1"
+  [(set_attr "v8type" "move,alu,load1,store1,fmov,fmov,fmov")
+   (set_attr "mode" "SI")
+   (set_attr "fp" "*,*,*,*,yes,yes,yes")]
+)
+
+(define_insn "*movdi_aarch64"
+  [(set (match_operand:DI 0 "nonimmediate_operand" "=r,k,r,r,r,m, r,  r,  *w, r,*w,w")
+       (match_operand:DI 1 "aarch64_mov_operand"  " r,r,k,N,m,rZ,Usa,Ush,rZ,*w,*w,Dd"))]
+  "(register_operand (operands[0], DImode)
+    || aarch64_reg_or_zero (operands[1], DImode))"
+  "@
+   mov\\t%x0, %x1
+   mov\\t%0, %x1
+   mov\\t%x0, %1
+   mov\\t%x0, %1
+   ldr\\t%x0, %1
+   str\\t%x1, %0
+   adr\\t%x0, %a1
+   adrp\\t%x0, %A1
+   fmov\\t%d0, %x1
+   fmov\\t%x0, %d1
+   fmov\\t%d0, %d1
+   movi\\t%d0, %1"
+  [(set_attr "v8type" "move,move,move,alu,load1,store1,adr,adr,fmov,fmov,fmov,fmov")
+   (set_attr "mode" "DI")
+   (set_attr "fp" "*,*,*,*,*,*,*,*,yes,yes,yes,yes")]
+)
+
+(define_insn "insv_imm<mode>"
+  [(set (zero_extract:GPI (match_operand:GPI 0 "register_operand" "+r")
+                         (const_int 16)
+                         (match_operand 1 "const_int_operand" "n"))
+       (match_operand 2 "const_int_operand" "n"))]
+  "INTVAL (operands[1]) < GET_MODE_BITSIZE (<MODE>mode)
+   && INTVAL (operands[1]) % 16 == 0
+   && INTVAL (operands[2]) <= 0xffff"
+  "movk\\t%<w>0, %2, lsl %1"
+  [(set_attr "v8type" "movk")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_expand "movti"
+  [(set (match_operand:TI 0 "nonimmediate_operand" "")
+       (match_operand:TI 1 "general_operand" ""))]
+  ""
+  "
+    if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
+      operands[1] = force_reg (TImode, operands[1]);
+  "
+)
+
+(define_insn "*movti_aarch64"
+  [(set (match_operand:TI 0
+        "nonimmediate_operand"  "=r, *w,r ,*w,r  ,Ump,Ump,*w,m")
+       (match_operand:TI 1
+        "aarch64_movti_operand" " rn,r ,*w,*w,Ump,r  ,Z  , m,*w"))]
+  "(register_operand (operands[0], TImode)
+    || aarch64_reg_or_zero (operands[1], TImode))"
+  "@
+   #
+   #
+   #
+   orr\\t%0.16b, %1.16b, %1.16b
+   ldp\\t%0, %H0, %1
+   stp\\t%1, %H1, %0
+   stp\\txzr, xzr, %0
+   ldr\\t%q0, %1
+   str\\t%q1, %0"
+  [(set_attr "v8type" "move2,fmovi2f,fmovf2i,*, \
+                      load2,store2,store2,fpsimd_load,fpsimd_store")
+   (set_attr "simd_type" "*,*,*,simd_move,*,*,*,*,*")
+   (set_attr "mode" "DI,DI,DI,TI,DI,DI,DI,TI,TI")
+   (set_attr "length" "8,8,8,4,4,4,4,4,4")
+   (set_attr "fp" "*,*,*,*,*,*,*,yes,yes")
+   (set_attr "simd" "*,*,*,yes,*,*,*,*,*")])
+
+;; Split a TImode register-register or register-immediate move into
+;; its component DImode pieces, taking care to handle overlapping
+;; source and dest registers.
+(define_split
+   [(set (match_operand:TI 0 "register_operand" "")
+        (match_operand:TI 1 "aarch64_reg_or_imm" ""))]
+  "reload_completed && aarch64_split_128bit_move_p (operands[0], operands[1])"
+  [(const_int 0)]
+{
+  aarch64_split_128bit_move (operands[0], operands[1]);
+  DONE;
+})
+
+(define_expand "mov<mode>"
+  [(set (match_operand:GPF 0 "nonimmediate_operand" "")
+       (match_operand:GPF 1 "general_operand" ""))]
+  ""
+  "
+    if (!TARGET_FLOAT)
+     {
+       sorry (\"%qs and floating point code\", \"-mgeneral-regs-only\");
+       FAIL;
+     }
+
+    if (GET_CODE (operands[0]) == MEM)
+      operands[1] = force_reg (<MODE>mode, operands[1]);
+  "
+)
+
+(define_insn "*movsf_aarch64"
+  [(set (match_operand:SF 0 "nonimmediate_operand" "= w,?r,w,w,m,r,m ,r")
+       (match_operand:SF 1 "general_operand"      "?rY, w,w,m,w,m,rY,r"))]
+  "TARGET_FLOAT && (register_operand (operands[0], SFmode)
+    || register_operand (operands[1], SFmode))"
+  "@
+   fmov\\t%s0, %w1
+   fmov\\t%w0, %s1
+   fmov\\t%s0, %s1
+   ldr\\t%s0, %1
+   str\\t%s1, %0
+   ldr\\t%w0, %1
+   str\\t%w1, %0
+   mov\\t%w0, %w1"
+  [(set_attr "v8type" "fmovi2f,fmovf2i,fmov,fpsimd_load,fpsimd_store,fpsimd_load,fpsimd_store,fmov")
+   (set_attr "mode" "SF")]
+)
+
+(define_insn "*movdf_aarch64"
+  [(set (match_operand:DF 0 "nonimmediate_operand" "= w,?r,w,w,m,r,m ,r")
+       (match_operand:DF 1 "general_operand"      "?rY, w,w,m,w,m,rY,r"))]
+  "TARGET_FLOAT && (register_operand (operands[0], DFmode)
+    || register_operand (operands[1], DFmode))"
+  "@
+   fmov\\t%d0, %x1
+   fmov\\t%x0, %d1
+   fmov\\t%d0, %d1
+   ldr\\t%d0, %1
+   str\\t%d1, %0
+   ldr\\t%x0, %1
+   str\\t%x1, %0
+   mov\\t%x0, %x1"
+  [(set_attr "v8type" "fmovi2f,fmovf2i,fmov,fpsimd_load,fpsimd_store,fpsimd_load,fpsimd_store,move")
+   (set_attr "mode" "DF")]
+)
+
+(define_expand "movtf"
+  [(set (match_operand:TF 0 "nonimmediate_operand" "")
+       (match_operand:TF 1 "general_operand" ""))]
+  ""
+  "
+    if (!TARGET_FLOAT)
+     {
+       sorry (\"%qs and floating point code\", \"-mgeneral-regs-only\");
+       FAIL;
+     }
+
+    if (GET_CODE (operands[0]) == MEM)
+      operands[1] = force_reg (TFmode, operands[1]);
+  "
+)
+
+(define_insn "*movtf_aarch64"
+  [(set (match_operand:TF 0
+        "nonimmediate_operand" "=w,?&r,w ,?r,w,?w,w,m,?r ,Ump")
+       (match_operand:TF 1
+        "general_operand"      " w,?r, ?r,w ,Y,Y ,m,w,Ump,?rY"))]
+  "TARGET_FLOAT && (register_operand (operands[0], TFmode)
+    || register_operand (operands[1], TFmode))"
+  "@
+   orr\\t%0.16b, %1.16b, %1.16b
+   mov\\t%0, %1\;mov\\t%H0, %H1
+   fmov\\t%d0, %Q1\;fmov\\t%0.d[1], %R1
+   fmov\\t%Q0, %d1\;fmov\\t%R0, %1.d[1]
+   movi\\t%0.2d, #0
+   fmov\\t%s0, wzr
+   ldr\\t%q0, %1
+   str\\t%q1, %0
+   ldp\\t%0, %H0, %1
+   stp\\t%1, %H1, %0"
+  [(set_attr "v8type" "logic,move2,fmovi2f,fmovf2i,fconst,fconst,fpsimd_load,fpsimd_store,fpsimd_load2,fpsimd_store2")
+   (set_attr "mode" "DF,DF,DF,DF,DF,DF,TF,TF,DF,DF")
+   (set_attr "length" "4,8,8,8,4,4,4,4,4,4")
+   (set_attr "fp" "*,*,yes,yes,*,yes,yes,yes,*,*")
+   (set_attr "simd" "yes,*,*,*,yes,*,*,*,*,*")]
+)
+
+
+;; Operands 1 and 3 are tied together by the final condition; so we allow
+;; fairly lax checking on the second memory operation.
+(define_insn "load_pair<mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (match_operand:GPI 1 "aarch64_mem_pair_operand" "Ump"))
+   (set (match_operand:GPI 2 "register_operand" "=r")
+        (match_operand:GPI 3 "memory_operand" "m"))]
+  "rtx_equal_p (XEXP (operands[3], 0),
+               plus_constant (Pmode,
+                              XEXP (operands[1], 0),
+                              GET_MODE_SIZE (<MODE>mode)))"
+  "ldp\\t%<w>0, %<w>2, %1"
+  [(set_attr "v8type" "load2")
+   (set_attr "mode" "<MODE>")]
+)
+
+;; Operands 0 and 2 are tied together by the final condition; so we allow
+;; fairly lax checking on the second memory operation.
+(define_insn "store_pair<mode>"
+  [(set (match_operand:GPI 0 "aarch64_mem_pair_operand" "=Ump")
+       (match_operand:GPI 1 "register_operand" "r"))
+   (set (match_operand:GPI 2 "memory_operand" "=m")
+        (match_operand:GPI 3 "register_operand" "r"))]
+  "rtx_equal_p (XEXP (operands[2], 0),
+               plus_constant (Pmode,
+                              XEXP (operands[0], 0),
+                              GET_MODE_SIZE (<MODE>mode)))"
+  "stp\\t%<w>1, %<w>3, %0"
+  [(set_attr "v8type" "store2")
+   (set_attr "mode" "<MODE>")]
+)
+
+;; Operands 1 and 3 are tied together by the final condition; so we allow
+;; fairly lax checking on the second memory operation.
+(define_insn "load_pair<mode>"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+       (match_operand:GPF 1 "aarch64_mem_pair_operand" "Ump"))
+   (set (match_operand:GPF 2 "register_operand" "=w")
+        (match_operand:GPF 3 "memory_operand" "m"))]
+  "rtx_equal_p (XEXP (operands[3], 0),
+               plus_constant (Pmode,
+                              XEXP (operands[1], 0),
+                              GET_MODE_SIZE (<MODE>mode)))"
+  "ldp\\t%<w>0, %<w>2, %1"
+  [(set_attr "v8type" "fpsimd_load2")
+   (set_attr "mode" "<MODE>")]
+)
+
+;; Operands 0 and 2 are tied together by the final condition; so we allow
+;; fairly lax checking on the second memory operation.
+(define_insn "store_pair<mode>"
+  [(set (match_operand:GPF 0 "aarch64_mem_pair_operand" "=Ump")
+       (match_operand:GPF 1 "register_operand" "w"))
+   (set (match_operand:GPF 2 "memory_operand" "=m")
+        (match_operand:GPF 3 "register_operand" "w"))]
+  "rtx_equal_p (XEXP (operands[2], 0),
+               plus_constant (Pmode,
+                              XEXP (operands[0], 0),
+                              GET_MODE_SIZE (<MODE>mode)))"
+  "stp\\t%<w>1, %<w>3, %0"
+  [(set_attr "v8type" "fpsimd_load2")
+   (set_attr "mode" "<MODE>")]
+)
+
+;; Load pair with writeback.  This is primarily used in function epilogues
+;; when restoring [fp,lr]
+(define_insn "loadwb_pair<GPI:mode>_<PTR:mode>"
+  [(parallel
+    [(set (match_operand:PTR 0 "register_operand" "=k")
+          (plus:PTR (match_operand:PTR 1 "register_operand" "0")
+                  (match_operand:PTR 4 "const_int_operand" "n")))
+     (set (match_operand:GPI 2 "register_operand" "=r")
+          (mem:GPI (plus:PTR (match_dup 1)
+                   (match_dup 4))))
+     (set (match_operand:GPI 3 "register_operand" "=r")
+          (mem:GPI (plus:PTR (match_dup 1)
+                   (match_operand:PTR 5 "const_int_operand" "n"))))])]
+  "INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPI:MODE>mode)"
+  "ldp\\t%<w>2, %<w>3, [%1], %4"
+  [(set_attr "v8type" "load2")
+   (set_attr "mode" "<GPI:MODE>")]
+)
+
+;; Store pair with writeback.  This is primarily used in function prologues
+;; when saving [fp,lr]
+(define_insn "storewb_pair<GPI:mode>_<PTR:mode>"
+  [(parallel
+    [(set (match_operand:PTR 0 "register_operand" "=&k")
+          (plus:PTR (match_operand:PTR 1 "register_operand" "0")
+                  (match_operand:PTR 4 "const_int_operand" "n")))
+     (set (mem:GPI (plus:PTR (match_dup 0)
+                   (match_dup 4)))
+          (match_operand:GPI 2 "register_operand" "r"))
+     (set (mem:GPI (plus:PTR (match_dup 0)
+                   (match_operand:PTR 5 "const_int_operand" "n")))
+          (match_operand:GPI 3 "register_operand" "r"))])]
+  "INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPI:MODE>mode)"
+  "stp\\t%<w>2, %<w>3, [%0, %4]!"
+  [(set_attr "v8type" "store2")
+   (set_attr "mode" "<GPI:MODE>")]
+)
+
+;; -------------------------------------------------------------------
+;; Sign/Zero extension
+;; -------------------------------------------------------------------
+
+(define_expand "<optab>sidi2"
+  [(set (match_operand:DI 0 "register_operand")
+       (ANY_EXTEND:DI (match_operand:SI 1 "nonimmediate_operand")))]
+  ""
+)
+
+(define_insn "*extendsidi2_aarch64"
+  [(set (match_operand:DI 0 "register_operand" "=r,r")
+        (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
+  ""
+  "@
+   sxtw\t%0, %w1
+   ldrsw\t%0, %1"
+  [(set_attr "v8type" "extend,load1")
+   (set_attr "mode" "DI")]
+)
+
+(define_insn "*zero_extendsidi2_aarch64"
+  [(set (match_operand:DI 0 "register_operand" "=r,r")
+        (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
+  ""
+  "@
+   uxtw\t%0, %w1
+   ldr\t%w0, %1"
+  [(set_attr "v8type" "extend,load1")
+   (set_attr "mode" "DI")]
+)
+
+(define_expand "<ANY_EXTEND:optab><SHORT:mode><GPI:mode>2"
+  [(set (match_operand:GPI 0 "register_operand")
+        (ANY_EXTEND:GPI (match_operand:SHORT 1 "nonimmediate_operand")))]
+  ""
+)
+
+(define_insn "*extend<SHORT:mode><GPI:mode>2_aarch64"
+  [(set (match_operand:GPI 0 "register_operand" "=r,r")
+        (sign_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
+  ""
+  "@
+   sxt<SHORT:size>\t%<GPI:w>0, %w1
+   ldrs<SHORT:size>\t%<GPI:w>0, %1"
+  [(set_attr "v8type" "extend,load1")
+   (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*zero_extend<SHORT:mode><GPI:mode>2_aarch64"
+  [(set (match_operand:GPI 0 "register_operand" "=r,r")
+        (zero_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
+  ""
+  "@
+   uxt<SHORT:size>\t%<GPI:w>0, %w1
+   ldr<SHORT:size>\t%w0, %1"
+  [(set_attr "v8type" "extend,load1")
+   (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_expand "<optab>qihi2"
+  [(set (match_operand:HI 0 "register_operand")
+        (ANY_EXTEND:HI (match_operand:QI 1 "nonimmediate_operand")))]
+  ""
+)
+
+(define_insn "*<optab>qihi2_aarch64"
+  [(set (match_operand:HI 0 "register_operand" "=r,r")
+        (ANY_EXTEND:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+  ""
+  "@
+   <su>xtb\t%w0, %w1
+   <ldrxt>b\t%w0, %1"
+  [(set_attr "v8type" "extend,load1")
+   (set_attr "mode" "HI")]
+)
+
+;; -------------------------------------------------------------------
+;; Simple arithmetic
+;; -------------------------------------------------------------------
+
+(define_expand "add<mode>3"
+  [(set
+    (match_operand:GPI 0 "register_operand" "")
+    (plus:GPI (match_operand:GPI 1 "register_operand" "")
+             (match_operand:GPI 2 "aarch64_pluslong_operand" "")))]
+  ""
+  "
+  if (! aarch64_plus_operand (operands[2], VOIDmode))
+    {
+      rtx subtarget = ((optimize && can_create_pseudo_p ())
+                      ? gen_reg_rtx (<MODE>mode) : operands[0]);
+      HOST_WIDE_INT imm = INTVAL (operands[2]);
+
+      if (imm < 0)
+       imm = -(-imm & ~0xfff);
+      else
+        imm &= ~0xfff;
+
+      emit_insn (gen_add<mode>3 (subtarget, operands[1], GEN_INT (imm)));
+      operands[1] = subtarget;
+      operands[2] = GEN_INT (INTVAL (operands[2]) - imm);
+    }
+  "
+)
+
+(define_insn "*addsi3_aarch64"
+  [(set
+    (match_operand:SI 0 "register_operand" "=rk,rk,rk")
+    (plus:SI
+     (match_operand:SI 1 "register_operand" "%rk,rk,rk")
+     (match_operand:SI 2 "aarch64_plus_operand" "I,r,J")))]
+  ""
+  "@
+  add\\t%w0, %w1, %2
+  add\\t%w0, %w1, %w2
+  sub\\t%w0, %w1, #%n2"
+  [(set_attr "v8type" "alu")
+   (set_attr "mode" "SI")]
+)
+
+(define_insn "*adddi3_aarch64"
+  [(set
+    (match_operand:DI 0 "register_operand" "=rk,rk,rk,!w")
+    (plus:DI
+     (match_operand:DI 1 "register_operand" "%rk,rk,rk,!w")
+     (match_operand:DI 2 "aarch64_plus_operand" "I,r,J,!w")))]
+  ""
+  "@
+  add\\t%x0, %x1, %2
+  add\\t%x0, %x1, %x2
+  sub\\t%x0, %x1, #%n2
+  add\\t%d0, %d1, %d2"
+  [(set_attr "v8type" "alu")
+   (set_attr "mode" "DI")
+   (set_attr "simd" "*,*,*,yes")]
+)
+
+(define_insn "*add<mode>3_compare0"
+  [(set (reg:CC_NZ CC_REGNUM)
+       (compare:CC_NZ
+        (plus:GPI (match_operand:GPI 1 "register_operand" "%r,r")
+                  (match_operand:GPI 2 "aarch64_plus_operand" "rI,J"))
+        (const_int 0)))
+   (set (match_operand:GPI 0 "register_operand" "=r,r")
+       (plus:GPI (match_dup 1) (match_dup 2)))]
+  ""
+  "@
+  adds\\t%<w>0, %<w>1, %<w>2
+  subs\\t%<w>0, %<w>1, #%n2"
+  [(set_attr "v8type" "alus")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add<mode>3nr_compare0"
+  [(set (reg:CC_NZ CC_REGNUM)
+       (compare:CC_NZ
+        (plus:GPI (match_operand:GPI 0 "register_operand" "%r,r")
+                  (match_operand:GPI 1 "aarch64_plus_operand" "rI,J"))
+        (const_int 0)))]
+  ""
+  "@
+  cmn\\t%<w>0, %<w>1
+  cmp\\t%<w>0, #%n1"
+  [(set_attr "v8type" "alus")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add_<shift>_<mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=rk")
+       (plus:GPI (ASHIFT:GPI (match_operand:GPI 1 "register_operand" "r")
+                             (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))
+                 (match_operand:GPI 3 "register_operand" "r")))]
+  ""
+  "add\\t%<w>0, %<w>3, %<w>1, <shift> %2"
+  [(set_attr "v8type" "alu_shift")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add_mul_imm_<mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=rk")
+       (plus:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+                           (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))
+                 (match_operand:GPI 3 "register_operand" "r")))]
+  ""
+  "add\\t%<w>0, %<w>3, %<w>1, lsl %p2"
+  [(set_attr "v8type" "alu_shift")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add_<optab><ALLX:mode>_<GPI:mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=rk")
+       (plus:GPI (ANY_EXTEND:GPI (match_operand:ALLX 1 "register_operand" "r"))
+                 (match_operand:GPI 2 "register_operand" "r")))]
+  ""
+  "add\\t%<GPI:w>0, %<GPI:w>2, %<GPI:w>1, <su>xt<ALLX:size>"
+  [(set_attr "v8type" "alu_ext")
+   (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*add_<optab><ALLX:mode>_shft_<GPI:mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=rk")
+       (plus:GPI (ashift:GPI (ANY_EXTEND:GPI
+                              (match_operand:ALLX 1 "register_operand" "r"))
+                             (match_operand 2 "aarch64_imm3" "Ui3"))
+                 (match_operand:GPI 3 "register_operand" "r")))]
+  ""
+  "add\\t%<GPI:w>0, %<GPI:w>3, %<GPI:w>1, <su>xt<ALLX:size> %2"
+  [(set_attr "v8type" "alu_ext")
+   (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*add_<optab><ALLX:mode>_mult_<GPI:mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=rk")
+       (plus:GPI (mult:GPI (ANY_EXTEND:GPI
+                            (match_operand:ALLX 1 "register_operand" "r"))
+                           (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+                 (match_operand:GPI 3 "register_operand" "r")))]
+  ""
+  "add\\t%<GPI:w>0, %<GPI:w>3, %<GPI:w>1, <su>xt<ALLX:size> %p2"
+  [(set_attr "v8type" "alu_ext")
+   (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*add_<optab><mode>_multp2"
+  [(set (match_operand:GPI 0 "register_operand" "=rk")
+       (plus:GPI (ANY_EXTRACT:GPI
+                  (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+                            (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+                  (match_operand 3 "const_int_operand" "n")
+                  (const_int 0))
+                 (match_operand:GPI 4 "register_operand" "r")))]
+  "aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])"
+  "add\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2"
+  [(set_attr "v8type" "alu_ext")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add<mode>3_carryin"
+  [(set
+    (match_operand:GPI 0 "register_operand" "=r")
+    (plus:GPI (geu:GPI (reg:CC CC_REGNUM) (const_int 0))
+             (plus:GPI
+               (match_operand:GPI 1 "register_operand" "r")
+               (match_operand:GPI 2 "register_operand" "r"))))]
+   ""
+   "adc\\t%<w>0, %<w>1, %<w>2"
+  [(set_attr "v8type" "adc")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add<mode>3_carryin_alt1"
+  [(set
+    (match_operand:GPI 0 "register_operand" "=r")
+    (plus:GPI (plus:GPI
+               (match_operand:GPI 1 "register_operand" "r")
+               (match_operand:GPI 2 "register_operand" "r"))
+              (geu:GPI (reg:CC CC_REGNUM) (const_int 0))))]
+   ""
+   "adc\\t%<w>0, %<w>1, %<w>2"
+  [(set_attr "v8type" "adc")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add<mode>3_carryin_alt2"
+  [(set
+    (match_operand:GPI 0 "register_operand" "=r")
+    (plus:GPI (plus:GPI
+                (geu:GPI (reg:CC CC_REGNUM) (const_int 0))
+               (match_operand:GPI 1 "register_operand" "r"))
+             (match_operand:GPI 2 "register_operand" "r")))]
+   ""
+   "adc\\t%<w>0, %<w>1, %<w>2"
+  [(set_attr "v8type" "adc")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add<mode>3_carryin_alt3"
+  [(set
+    (match_operand:GPI 0 "register_operand" "=r")
+    (plus:GPI (plus:GPI
+                (geu:GPI (reg:CC CC_REGNUM) (const_int 0))
+               (match_operand:GPI 2 "register_operand" "r"))
+             (match_operand:GPI 1 "register_operand" "r")))]
+   ""
+   "adc\\t%<w>0, %<w>1, %<w>2"
+  [(set_attr "v8type" "adc")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add_uxt<mode>_multp2"
+  [(set (match_operand:GPI 0 "register_operand" "=rk")
+       (plus:GPI (and:GPI
+                  (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+                            (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+                  (match_operand 3 "const_int_operand" "n"))
+                 (match_operand:GPI 4 "register_operand" "r")))]
+  "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), INTVAL (operands[3])) != 0"
+  "*
+  operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
+                                          INTVAL (operands[3])));
+  return \"add\t%<w>0, %<w>4, %<w>1, uxt%e3 %p2\";"
+  [(set_attr "v8type" "alu_ext")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "subsi3"
+  [(set (match_operand:SI 0 "register_operand" "=rk")
+       (minus:SI (match_operand:SI 1 "register_operand" "r")
+                  (match_operand:SI 2 "register_operand" "r")))]
+  ""
+  "sub\\t%w0, %w1, %w2"
+  [(set_attr "v8type" "alu")
+   (set_attr "mode" "SI")]
+)
+
+(define_insn "subdi3"
+  [(set (match_operand:DI 0 "register_operand" "=rk,!w")
+       (minus:DI (match_operand:DI 1 "register_operand" "r,!w")
+                  (match_operand:DI 2 "register_operand" "r,!w")))]
+  ""
+  "@
+   sub\\t%x0, %x1, %x2
+   sub\\t%d0, %d1, %d2"
+  [(set_attr "v8type" "alu")
+   (set_attr "mode" "DI")
+   (set_attr "simd" "*,yes")]
+)
+
+
+(define_insn "*sub<mode>3_compare0"
+  [(set (reg:CC_NZ CC_REGNUM)
+       (compare:CC_NZ (minus:GPI (match_operand:GPI 1 "register_operand" "r")
+                                 (match_operand:GPI 2 "register_operand" "r"))
+                      (const_int 0)))
+   (set (match_operand:GPI 0 "register_operand" "=r")
+       (minus:GPI (match_dup 1) (match_dup 2)))]
+  ""
+  "subs\\t%<w>0, %<w>1, %<w>2"
+  [(set_attr "v8type" "alus")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*sub_<shift>_<mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=rk")
+       (minus:GPI (match_operand:GPI 3 "register_operand" "r")
+                  (ASHIFT:GPI
+                   (match_operand:GPI 1 "register_operand" "r")
+                   (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))]
+  ""
+  "sub\\t%<w>0, %<w>3, %<w>1, <shift> %2"
+  [(set_attr "v8type" "alu_shift")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*sub_mul_imm_<mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=rk")
+       (minus:GPI (match_operand:GPI 3 "register_operand" "r")
+                  (mult:GPI
+                   (match_operand:GPI 1 "register_operand" "r")
+                   (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))))]
+  ""
+  "sub\\t%<w>0, %<w>3, %<w>1, lsl %p2"
+  [(set_attr "v8type" "alu_shift")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*sub_<optab><ALLX:mode>_<GPI:mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=rk")
+       (minus:GPI (match_operand:GPI 1 "register_operand" "r")
+                  (ANY_EXTEND:GPI
+                   (match_operand:ALLX 2 "register_operand" "r"))))]
+  ""
+  "sub\\t%<GPI:w>0, %<GPI:w>1, %<GPI:w>2, <su>xt<ALLX:size>"
+  [(set_attr "v8type" "alu_ext")
+   (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*sub_<optab><ALLX:mode>_shft_<GPI:mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=rk")
+       (minus:GPI (match_operand:GPI 1 "register_operand" "r")
+                  (ashift:GPI (ANY_EXTEND:GPI
+                               (match_operand:ALLX 2 "register_operand" "r"))
+                              (match_operand 3 "aarch64_imm3" "Ui3"))))]
+  ""
+  "sub\\t%<GPI:w>0, %<GPI:w>1, %<GPI:w>2, <su>xt<ALLX:size> %3"
+  [(set_attr "v8type" "alu_ext")
+   (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*sub_<optab><mode>_multp2"
+  [(set (match_operand:GPI 0 "register_operand" "=rk")
+       (minus:GPI (match_operand:GPI 4 "register_operand" "r")
+                  (ANY_EXTRACT:GPI
+                   (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+                             (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+                   (match_operand 3 "const_int_operand" "n")
+                   (const_int 0))))]
+  "aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])"
+  "sub\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2"
+  [(set_attr "v8type" "alu_ext")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*sub_uxt<mode>_multp2"
+  [(set (match_operand:GPI 0 "register_operand" "=rk")
+       (minus:GPI (match_operand:GPI 4 "register_operand" "r")
+                  (and:GPI
+                   (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+                             (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+                   (match_operand 3 "const_int_operand" "n"))))]
+  "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),INTVAL (operands[3])) != 0"
+  "*
+  operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
+                                          INTVAL (operands[3])));
+  return \"sub\t%<w>0, %<w>4, %<w>1, uxt%e3 %p2\";"
+  [(set_attr "v8type" "alu_ext")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "neg<mode>2"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (neg:GPI (match_operand:GPI 1 "register_operand" "r")))]
+  ""
+  "neg\\t%<w>0, %<w>1"
+  [(set_attr "v8type" "alu")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*neg<mode>2_compare0"
+  [(set (reg:CC_NZ CC_REGNUM)
+       (compare:CC_NZ (neg:GPI (match_operand:GPI 1 "register_operand" "r"))
+                      (const_int 0)))
+   (set (match_operand:GPI 0 "register_operand" "=r")
+       (neg:GPI (match_dup 1)))]
+  ""
+  "negs\\t%<w>0, %<w>1"
+  [(set_attr "v8type" "alus")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*neg_<shift>_<mode>2"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (neg:GPI (ASHIFT:GPI
+                 (match_operand:GPI 1 "register_operand" "r")
+                 (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))]
+  ""
+  "neg\\t%<w>0, %<w>1, <shift> %2"
+  [(set_attr "v8type" "alu_shift")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*neg_mul_imm_<mode>2"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (neg:GPI (mult:GPI
+                 (match_operand:GPI 1 "register_operand" "r")
+                 (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))))]
+  ""
+  "neg\\t%<w>0, %<w>1, lsl %p2"
+  [(set_attr "v8type" "alu_shift")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "mul<mode>3"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+                 (match_operand:GPI 2 "register_operand" "r")))]
+  ""
+  "mul\\t%<w>0, %<w>1, %<w>2"
+  [(set_attr "v8type" "mult")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*madd<mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (plus:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+                           (match_operand:GPI 2 "register_operand" "r"))
+                 (match_operand:GPI 3 "register_operand" "r")))]
+  ""
+  "madd\\t%<w>0, %<w>1, %<w>2, %<w>3"
+  [(set_attr "v8type" "madd")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*msub<mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (minus:GPI (match_operand:GPI 3 "register_operand" "r")
+                  (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+                            (match_operand:GPI 2 "register_operand" "r"))))]
+
+  ""
+  "msub\\t%<w>0, %<w>1, %<w>2, %<w>3"
+  [(set_attr "v8type" "madd")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*mul<mode>_neg"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (mult:GPI (neg:GPI (match_operand:GPI 1 "register_operand" "r"))
+                 (match_operand:GPI 2 "register_operand" "r")))]
+
+  ""
+  "mneg\\t%<w>0, %<w>1, %<w>2"
+  [(set_attr "v8type" "mult")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "<su_optab>mulsidi3"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+       (mult:DI (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r"))
+                (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))))]
+  ""
+  "<su>mull\\t%0, %w1, %w2"
+  [(set_attr "v8type" "mull")
+   (set_attr "mode" "DI")]
+)
+
+(define_insn "<su_optab>maddsidi4"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+       (plus:DI (mult:DI
+                 (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r"))
+                 (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r")))
+                (match_operand:DI 3 "register_operand" "r")))]
+  ""
+  "<su>maddl\\t%0, %w1, %w2, %3"
+  [(set_attr "v8type" "maddl")
+   (set_attr "mode" "DI")]
+)
+
+(define_insn "<su_optab>msubsidi4"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+       (minus:DI
+        (match_operand:DI 3 "register_operand" "r")
+        (mult:DI (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r"))
+                 (ANY_EXTEND:DI
+                  (match_operand:SI 2 "register_operand" "r")))))]
+  ""
+  "<su>msubl\\t%0, %w1, %w2, %3"
+  [(set_attr "v8type" "maddl")
+   (set_attr "mode" "DI")]
+)
+
+(define_insn "*<su_optab>mulsidi_neg"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+       (mult:DI (neg:DI
+                 (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r")))
+                 (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))))]
+  ""
+  "<su>mnegl\\t%0, %w1, %w2"
+  [(set_attr "v8type" "mull")
+   (set_attr "mode" "DI")]
+)
+
+(define_insn "<su>muldi3_highpart"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+       (truncate:DI
+        (lshiftrt:TI
+         (mult:TI
+          (ANY_EXTEND:TI (match_operand:DI 1 "register_operand" "r"))
+          (ANY_EXTEND:TI (match_operand:DI 2 "register_operand" "r")))
+         (const_int 64))))]
+  ""
+  "<su>mulh\\t%0, %1, %2"
+  [(set_attr "v8type" "mulh")
+   (set_attr "mode" "DI")]
+)
+
+(define_insn "<su_optab>div<mode>3"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (ANY_DIV:GPI (match_operand:GPI 1 "register_operand" "r")
+                    (match_operand:GPI 2 "register_operand" "r")))]
+  ""
+  "<su>div\\t%<w>0, %<w>1, %<w>2"
+  [(set_attr "v8type" "<su>div")
+   (set_attr "mode" "<MODE>")]
+)
+
+;; -------------------------------------------------------------------
+;; Comparison insns
+;; -------------------------------------------------------------------
+
+(define_insn "*cmp<mode>"
+  [(set (reg:CC CC_REGNUM)
+       (compare:CC (match_operand:GPI 0 "register_operand" "r,r")
+                   (match_operand:GPI 1 "aarch64_plus_operand" "rI,J")))]
+  ""
+  "@
+   cmp\\t%<w>0, %<w>1
+   cmn\\t%<w>0, #%n1"
+  [(set_attr "v8type" "alus")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*cmp<mode>"
+  [(set (reg:CCFP CC_REGNUM)
+        (compare:CCFP (match_operand:GPF 0 "register_operand" "w,w")
+                     (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))]
+   "TARGET_FLOAT"
+   "@
+    fcmp\\t%<s>0, #0.0
+    fcmp\\t%<s>0, %<s>1"
+  [(set_attr "v8type" "fcmp")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*cmpe<mode>"
+  [(set (reg:CCFPE CC_REGNUM)
+        (compare:CCFPE (match_operand:GPF 0 "register_operand" "w,w")
+                      (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))]
+   "TARGET_FLOAT"
+   "@
+    fcmpe\\t%<s>0, #0.0
+    fcmpe\\t%<s>0, %<s>1"
+  [(set_attr "v8type" "fcmp")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*cmp_swp_<shift>_reg<mode>"
+  [(set (reg:CC_SWP CC_REGNUM)
+       (compare:CC_SWP (ASHIFT:GPI
+                        (match_operand:GPI 0 "register_operand" "r")
+                        (match_operand:QI 1 "aarch64_shift_imm_<mode>" "n"))
+                       (match_operand:GPI 2 "aarch64_reg_or_zero" "rZ")))]
+  ""
+  "cmp\\t%<w>2, %<w>0, <shift> %1"
+  [(set_attr "v8type" "alus_shift")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*cmp_swp_<optab><ALLX:mode>_reg<GPI:mode>"
+  [(set (reg:CC_SWP CC_REGNUM)
+       (compare:CC_SWP (ANY_EXTEND:GPI
+                        (match_operand:ALLX 0 "register_operand" "r"))
+                       (match_operand:GPI 1 "register_operand" "r")))]
+  ""
+  "cmp\\t%<GPI:w>1, %<GPI:w>0, <su>xt<ALLX:size>"
+  [(set_attr "v8type" "alus_ext")
+   (set_attr "mode" "<GPI:MODE>")]
+)
+
+
+;; -------------------------------------------------------------------
+;; Store-flag and conditional select insns
+;; -------------------------------------------------------------------
+
+(define_expand "cstore<mode>4"
+  [(set (match_operand:SI 0 "register_operand" "")
+       (match_operator:SI 1 "aarch64_comparison_operator"
+        [(match_operand:GPI 2 "register_operand" "")
+         (match_operand:GPI 3 "aarch64_plus_operand" "")]))]
+  ""
+  "
+  operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
+                                     operands[3]);
+  operands[3] = const0_rtx;
+  "
+)
+
+(define_expand "cstore<mode>4"
+  [(set (match_operand:SI 0 "register_operand" "")
+       (match_operator:SI 1 "aarch64_comparison_operator"
+        [(match_operand:GPF 2 "register_operand" "")
+         (match_operand:GPF 3 "register_operand" "")]))]
+  ""
+  "
+  operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
+                                     operands[3]);
+  operands[3] = const0_rtx;
+  "
+)
+
+(define_insn "*cstore<mode>_insn"
+  [(set (match_operand:ALLI 0 "register_operand" "=r")
+       (match_operator:ALLI 1 "aarch64_comparison_operator"
+        [(match_operand 2 "cc_register" "") (const_int 0)]))]
+  ""
+  "cset\\t%<w>0, %m1"
+  [(set_attr "v8type" "csel")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*cstore<mode>_neg"
+  [(set (match_operand:ALLI 0 "register_operand" "=r")
+       (neg:ALLI (match_operator:ALLI 1 "aarch64_comparison_operator"
+                 [(match_operand 2 "cc_register" "") (const_int 0)])))]
+  ""
+  "csetm\\t%<w>0, %m1"
+  [(set_attr "v8type" "csel")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_expand "cmov<mode>6"
+  [(set (match_operand:GPI 0 "register_operand" "")
+       (if_then_else:GPI
+        (match_operator 1 "aarch64_comparison_operator"
+         [(match_operand:GPI 2 "register_operand" "")
+          (match_operand:GPI 3 "aarch64_plus_operand" "")])
+        (match_operand:GPI 4 "register_operand" "")
+        (match_operand:GPI 5 "register_operand" "")))]
+  ""
+  "
+  operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
+                                     operands[3]);
+  operands[3] = const0_rtx;
+  "
+)
+
+(define_expand "cmov<mode>6"
+  [(set (match_operand:GPF 0 "register_operand" "")
+       (if_then_else:GPF
+        (match_operator 1 "aarch64_comparison_operator"
+         [(match_operand:GPF 2 "register_operand" "")
+          (match_operand:GPF 3 "register_operand" "")])
+        (match_operand:GPF 4 "register_operand" "")
+        (match_operand:GPF 5 "register_operand" "")))]
+  ""
+  "
+  operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
+                                     operands[3]);
+  operands[3] = const0_rtx;
+  "
+)
+
+(define_insn "*cmov<mode>_insn"
+  [(set (match_operand:ALLI 0 "register_operand" "=r,r,r,r")
+       (if_then_else:ALLI
+        (match_operator 1 "aarch64_comparison_operator"
+         [(match_operand 2 "cc_register" "") (const_int 0)])
+        (match_operand:ALLI 3 "aarch64_reg_zero_or_m1" "rZ,rZ,UsM,UsM")
+        (match_operand:ALLI 4 "aarch64_reg_zero_or_m1" "rZ,UsM,rZ,UsM")))]
+  ""
+  ;; Final alternative should be unreachable, but included for completeness
+  "@
+   csel\\t%<w>0, %<w>3, %<w>4, %m1
+   csinv\\t%<w>0, %<w>3, <w>zr, %m1
+   csinv\\t%<w>0, %<w>4, <w>zr, %M1
+   mov\\t%<w>0, -1"
+  [(set_attr "v8type" "csel")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*cmov<mode>_insn"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+       (if_then_else:GPF
+        (match_operator 1 "aarch64_comparison_operator"
+         [(match_operand 2 "cc_register" "") (const_int 0)])
+        (match_operand:GPF 3 "register_operand" "w")
+        (match_operand:GPF 4 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fcsel\\t%<s>0, %<s>3, %<s>4, %m1"
+  [(set_attr "v8type" "fcsel")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_expand "mov<mode>cc"
+  [(set (match_operand:ALLI 0 "register_operand" "")
+       (if_then_else:ALLI (match_operand 1 "aarch64_comparison_operator" "")
+                          (match_operand:ALLI 2 "register_operand" "")
+                          (match_operand:ALLI 3 "register_operand" "")))]
+  ""
+  {
+    rtx ccreg;
+    enum rtx_code code = GET_CODE (operands[1]);
+
+    if (code == UNEQ || code == LTGT)
+      FAIL;
+
+    ccreg = aarch64_gen_compare_reg (code, XEXP (operands[1], 0),
+                                 XEXP (operands[1], 1));
+    operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
+  }
+)
+
+(define_expand "mov<GPF:mode><GPI:mode>cc"
+  [(set (match_operand:GPI 0 "register_operand" "")
+       (if_then_else:GPI (match_operand 1 "aarch64_comparison_operator" "")
+                         (match_operand:GPF 2 "register_operand" "")
+                         (match_operand:GPF 3 "register_operand" "")))]
+  ""
+  {
+    rtx ccreg;
+    enum rtx_code code = GET_CODE (operands[1]);
+
+    if (code == UNEQ || code == LTGT)
+      FAIL;
+
+    ccreg = aarch64_gen_compare_reg (code, XEXP (operands[1], 0),
+                                 XEXP (operands[1], 1));
+    operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
+  }
+)
+
+(define_insn "*csinc2<mode>_insn"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+        (plus:GPI (match_operator:GPI 2 "aarch64_comparison_operator"
+                 [(match_operand:CC 3 "cc_register" "") (const_int 0)])
+                (match_operand:GPI 1 "register_operand" "r")))]
+  ""
+  "csinc\\t%<w>0, %<w>1, %<w>1, %M2"
+  [(set_attr "v8type" "csel")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "csinc3<mode>_insn"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+        (if_then_else:GPI
+         (match_operator:GPI 1 "aarch64_comparison_operator"
+          [(match_operand:CC 2 "cc_register" "") (const_int 0)])
+         (plus:GPI (match_operand:GPI 3 "register_operand" "r")
+                   (const_int 1))
+         (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))]
+  ""
+  "csinc\\t%<w>0, %<w>4, %<w>3, %M1"
+  [(set_attr "v8type" "csel")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*csinv3<mode>_insn"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+        (if_then_else:GPI
+         (match_operator:GPI 1 "aarch64_comparison_operator"
+          [(match_operand:CC 2 "cc_register" "") (const_int 0)])
+         (not:GPI (match_operand:GPI 3 "register_operand" "r"))
+         (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))]
+  ""
+  "csinv\\t%<w>0, %<w>4, %<w>3, %M1"
+  [(set_attr "v8type" "csel")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "*csneg3<mode>_insn"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+        (if_then_else:GPI
+         (match_operator:GPI 1 "aarch64_comparison_operator"
+          [(match_operand:CC 2 "cc_register" "") (const_int 0)])
+         (neg:GPI (match_operand:GPI 3 "register_operand" "r"))
+         (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))]
+  ""
+  "csneg\\t%<w>0, %<w>4, %<w>3, %M1"
+  [(set_attr "v8type" "csel")
+   (set_attr "mode" "<MODE>")])
+
+;; -------------------------------------------------------------------
+;; Logical operations
+;; -------------------------------------------------------------------
+
+(define_insn "<optab><mode>3"
+  [(set (match_operand:GPI 0 "register_operand" "=r,rk")
+       (LOGICAL:GPI (match_operand:GPI 1 "register_operand" "%r,r")
+                    (match_operand:GPI 2 "aarch64_logical_operand" "r,<lconst>")))]
+  ""
+  "<logical>\\t%<w>0, %<w>1, %<w>2"
+  [(set_attr "v8type" "logic,logic_imm")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "*<LOGICAL:optab>_<SHIFT:optab><mode>3"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (LOGICAL:GPI (SHIFT:GPI
+                     (match_operand:GPI 1 "register_operand" "r")
+                     (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))
+                    (match_operand:GPI 3 "register_operand" "r")))]
+  ""
+  "<LOGICAL:logical>\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2"
+  [(set_attr "v8type" "logic_shift")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "one_cmpl<mode>2"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (not:GPI (match_operand:GPI 1 "register_operand" "r")))]
+  ""
+  "mvn\\t%<w>0, %<w>1"
+  [(set_attr "v8type" "logic")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "*one_cmpl_<optab><mode>2"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (not:GPI (SHIFT:GPI (match_operand:GPI 1 "register_operand" "r")
+                           (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))]
+  ""
+  "mvn\\t%<w>0, %<w>1, <shift> %2"
+  [(set_attr "v8type" "logic_shift")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "*<LOGICAL:optab>_one_cmpl<mode>3"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (LOGICAL:GPI (not:GPI
+                     (match_operand:GPI 1 "register_operand" "r"))
+                    (match_operand:GPI 2 "register_operand" "r")))]
+  ""
+  "<LOGICAL:nlogical>\\t%<w>0, %<w>2, %<w>1"
+  [(set_attr "v8type" "logic")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "*<LOGICAL:optab>_one_cmpl_<SHIFT:optab><mode>3"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (LOGICAL:GPI (not:GPI
+                     (SHIFT:GPI
+                      (match_operand:GPI 1 "register_operand" "r")
+                      (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n")))
+                    (match_operand:GPI 3 "register_operand" "r")))]
+  ""
+  "<LOGICAL:nlogical>\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2"
+  [(set_attr "v8type" "logic_shift")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "clz<mode>2"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (clz:GPI (match_operand:GPI 1 "register_operand" "r")))]
+  ""
+  "clz\\t%<w>0, %<w>1"
+  [(set_attr "v8type" "clz")
+   (set_attr "mode" "<MODE>")])
+
+(define_expand "ffs<mode>2"
+  [(match_operand:GPI 0 "register_operand")
+   (match_operand:GPI 1 "register_operand")]
+  ""
+  {
+    rtx ccreg = aarch64_gen_compare_reg (EQ, operands[1], const0_rtx);
+    rtx x = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
+
+    emit_insn (gen_rbit<mode>2 (operands[0], operands[1]));
+    emit_insn (gen_clz<mode>2 (operands[0], operands[0]));
+    emit_insn (gen_csinc3<mode>_insn (operands[0], x, ccreg, operands[0], const0_rtx));
+    DONE;
+  }
+)
+
+(define_insn "clrsb<mode>2"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (unspec:GPI [(match_operand:GPI 1 "register_operand" "r")] UNSPEC_CLS))]
+  ""
+  "cls\\t%<w>0, %<w>1"
+  [(set_attr "v8type" "clz")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "rbit<mode>2"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (unspec:GPI [(match_operand:GPI 1 "register_operand" "r")] UNSPEC_RBIT))]
+  ""
+  "rbit\\t%<w>0, %<w>1"
+  [(set_attr "v8type" "rbit")
+   (set_attr "mode" "<MODE>")])
+
+(define_expand "ctz<mode>2"
+  [(match_operand:GPI 0 "register_operand")
+   (match_operand:GPI 1 "register_operand")]
+  ""
+  {
+    emit_insn (gen_rbit<mode>2 (operands[0], operands[1]));
+    emit_insn (gen_clz<mode>2 (operands[0], operands[0]));
+    DONE;
+  }
+)
+
+(define_insn "*and<mode>3nr_compare0"
+  [(set (reg:CC CC_REGNUM)
+       (compare:CC
+        (and:GPI (match_operand:GPI 0 "register_operand" "%r,r")
+                 (match_operand:GPI 1 "aarch64_logical_operand" "r,<lconst>"))
+        (const_int 0)))]
+  ""
+  "tst\\t%<w>0, %<w>1"
+  [(set_attr "v8type" "logics")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "*and_<SHIFT:optab><mode>3nr_compare0"
+  [(set (reg:CC CC_REGNUM)
+       (compare:CC
+        (and:GPI (SHIFT:GPI
+                  (match_operand:GPI 0 "register_operand" "r")
+                  (match_operand:QI 1 "aarch64_shift_imm_<mode>" "n"))
+                 (match_operand:GPI 2 "register_operand" "r"))
+       (const_int 0)))]
+  ""
+  "tst\\t%<w>2, %<w>0, <SHIFT:shift> %1"
+  [(set_attr "v8type" "logics_shift")
+   (set_attr "mode" "<MODE>")])
+
+;; -------------------------------------------------------------------
+;; Shifts
+;; -------------------------------------------------------------------
+
+(define_expand "<optab><mode>3"
+  [(set (match_operand:GPI 0 "register_operand")
+       (ASHIFT:GPI (match_operand:GPI 1 "register_operand")
+                   (match_operand:QI 2 "nonmemory_operand")))]
+  ""
+  {
+    if (CONST_INT_P (operands[2]))
+      {
+        operands[2] = GEN_INT (INTVAL (operands[2])
+                               & (GET_MODE_BITSIZE (<MODE>mode) - 1));
+
+        if (operands[2] == const0_rtx)
+          {
+           emit_insn (gen_mov<mode> (operands[0], operands[1]));
+           DONE;
+          }
+      }
+  }
+)
+
+(define_expand "ashl<mode>3"
+  [(set (match_operand:SHORT 0 "register_operand")
+       (ashift:SHORT (match_operand:SHORT 1 "register_operand")
+                     (match_operand:QI 2 "nonmemory_operand")))]
+  ""
+  {
+    if (CONST_INT_P (operands[2]))
+      {
+        operands[2] = GEN_INT (INTVAL (operands[2])
+                               & (GET_MODE_BITSIZE (<MODE>mode) - 1));
+
+        if (operands[2] == const0_rtx)
+          {
+           emit_insn (gen_mov<mode> (operands[0], operands[1]));
+           DONE;
+          }
+      }
+  }
+)
+
+(define_expand "rotr<mode>3"
+  [(set (match_operand:GPI 0 "register_operand")
+       (rotatert:GPI (match_operand:GPI 1 "register_operand")
+                     (match_operand:QI 2 "nonmemory_operand")))]
+  ""
+  {
+    if (CONST_INT_P (operands[2]))
+      {
+        operands[2] = GEN_INT (INTVAL (operands[2])
+                               & (GET_MODE_BITSIZE (<MODE>mode) - 1));
+
+        if (operands[2] == const0_rtx)
+          {
+           emit_insn (gen_mov<mode> (operands[0], operands[1]));
+           DONE;
+          }
+      }
+  }
+)
+
+(define_expand "rotl<mode>3"
+  [(set (match_operand:GPI 0 "register_operand")
+       (rotatert:GPI (match_operand:GPI 1 "register_operand")
+                     (match_operand:QI 2 "nonmemory_operand")))]
+  ""
+  {
+    /* (SZ - cnt) % SZ == -cnt % SZ */
+    if (CONST_INT_P (operands[2]))
+      {
+        operands[2] = GEN_INT ((-INTVAL (operands[2]))
+                              & (GET_MODE_BITSIZE (<MODE>mode) - 1));
+        if (operands[2] == const0_rtx)
+          {
+           emit_insn (gen_mov<mode> (operands[0], operands[1]));
+           DONE;
+          }
+      }
+    else
+      operands[2] = expand_simple_unop (QImode, NEG, operands[2],
+                                       NULL_RTX, 1);
+  }
+)
+
+(define_insn "*<optab><mode>3_insn"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (SHIFT:GPI
+        (match_operand:GPI 1 "register_operand" "r")
+        (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>" "rUs<cmode>")))]
+  ""
+  "<shift>\\t%<w>0, %<w>1, %<w>2"
+  [(set_attr "v8type" "shift")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*ashl<mode>3_insn"
+  [(set (match_operand:SHORT 0 "register_operand" "=r")
+       (ashift:SHORT (match_operand:SHORT 1 "register_operand" "r")
+                     (match_operand:QI 2 "aarch64_reg_or_shift_imm_si" "rUss")))]
+  ""
+  "lsl\\t%<w>0, %<w>1, %<w>2"
+  [(set_attr "v8type" "shift")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*<optab><mode>3_insn"
+  [(set (match_operand:SHORT 0 "register_operand" "=r")
+       (ASHIFT:SHORT (match_operand:SHORT 1 "register_operand" "r")
+                     (match_operand 2 "const_int_operand" "n")))]
+  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<MODE>mode)"
+{
+  operands[3] = GEN_INT (<sizen> - UINTVAL (operands[2]));
+  return "<bfshift>\t%w0, %w1, %2, %3";
+}
+  [(set_attr "v8type" "bfm")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*<ANY_EXTEND:optab><GPI:mode>_ashl<SHORT:mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (ANY_EXTEND:GPI
+        (ashift:SHORT (match_operand:SHORT 1 "register_operand" "r")
+                      (match_operand 2 "const_int_operand" "n"))))]
+  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<SHORT:MODE>mode)"
+{
+  operands[3] = GEN_INT (<SHORT:sizen> - UINTVAL (operands[2]));
+  return "<su>bfiz\t%<GPI:w>0, %<GPI:w>1, %2, %3";
+}
+  [(set_attr "v8type" "bfm")
+   (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*zero_extend<GPI:mode>_lshr<SHORT:mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (zero_extend:GPI
+        (lshiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r")
+                        (match_operand 2 "const_int_operand" "n"))))]
+  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<SHORT:MODE>mode)"
+{
+  operands[3] = GEN_INT (<SHORT:sizen> - UINTVAL (operands[2]));
+  return "ubfx\t%<GPI:w>0, %<GPI:w>1, %2, %3";
+}
+  [(set_attr "v8type" "bfm")
+   (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*extend<GPI:mode>_ashr<SHORT:mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (sign_extend:GPI
+        (ashiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r")
+                        (match_operand 2 "const_int_operand" "n"))))]
+  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<SHORT:MODE>mode)"
+{
+  operands[3] = GEN_INT (<SHORT:sizen> - UINTVAL (operands[2]));
+  return "sbfx\\t%<GPI:w>0, %<GPI:w>1, %2, %3";
+}
+  [(set_attr "v8type" "bfm")
+   (set_attr "mode" "<GPI:MODE>")]
+)
+
+;; -------------------------------------------------------------------
+;; Bitfields
+;; -------------------------------------------------------------------
+
+(define_expand "<optab>"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+       (ANY_EXTRACT:DI (match_operand:DI 1 "register_operand" "r")
+                       (match_operand 2 "const_int_operand" "n")
+                       (match_operand 3 "const_int_operand" "n")))]
+  ""
+  ""
+)
+
+(define_insn "*<optab><mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (ANY_EXTRACT:GPI (match_operand:GPI 1 "register_operand" "r")
+                        (match_operand 2 "const_int_operand" "n")
+                        (match_operand 3 "const_int_operand" "n")))]
+  ""
+  "<su>bfx\\t%<w>0, %<w>1, %3, %2"
+  [(set_attr "v8type" "bfm")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*<optab><ALLX:mode>_shft_<GPI:mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (ashift:GPI (ANY_EXTEND:GPI
+                    (match_operand:ALLX 1 "register_operand" "r"))
+                   (match_operand 2 "const_int_operand" "n")))]
+  "UINTVAL (operands[2]) < <GPI:sizen>"
+{
+  operands[3] = (<ALLX:sizen> <= (<GPI:sizen> - UINTVAL (operands[2])))
+             ? GEN_INT (<ALLX:sizen>)
+             : GEN_INT (<GPI:sizen> - UINTVAL (operands[2]));
+  return "<su>bfiz\t%<GPI:w>0, %<GPI:w>1, %2, %3";
+}
+  [(set_attr "v8type" "bfm")
+   (set_attr "mode" "<GPI:MODE>")]
+)
+
+;; XXX We should match (any_extend (ashift)) here, like (and (ashift)) below
+
+(define_insn "*andim_ashift<mode>_bfiz"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+       (and:GPI (ashift:GPI (match_operand:GPI 1 "register_operand" "r")
+                            (match_operand 2 "const_int_operand" "n"))
+                (match_operand 3 "const_int_operand" "n")))]
+  "exact_log2 ((INTVAL (operands[3]) >> INTVAL (operands[2])) + 1) >= 0
+   && (INTVAL (operands[3]) & ((1 << INTVAL (operands[2])) - 1)) == 0"
+  "ubfiz\\t%<w>0, %<w>1, %2, %P3"
+  [(set_attr "v8type" "bfm")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "bswap<mode>2"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+        (bswap:GPI (match_operand:GPI 1 "register_operand" "r")))]
+  ""
+  "rev\\t%<w>0, %<w>1"
+  [(set_attr "v8type" "rev")
+   (set_attr "mode" "<MODE>")]
+)
+
+;; -------------------------------------------------------------------
+;; Floating-point intrinsics
+;; -------------------------------------------------------------------
+
+;; trunc - nothrow
+
+(define_insn "btrunc<mode>2"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+        UNSPEC_FRINTZ))]
+  "TARGET_FLOAT"
+  "frintz\\t%<s>0, %<s>1"
+  [(set_attr "v8type" "frint")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*lbtrunc<su_optab><GPF:mode><GPI:mode>2"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+        (FIXUORS:GPI (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+                     UNSPEC_FRINTZ)))]
+  "TARGET_FLOAT"
+  "fcvtz<su>\\t%<GPI:w>0, %<GPF:s>1"
+  [(set_attr "v8type" "fcvtf2i")
+   (set_attr "mode" "<GPF:MODE>")
+   (set_attr "mode2" "<GPI:MODE>")]
+)
+
+;; ceil - nothrow
+
+(define_insn "ceil<mode>2"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+        UNSPEC_FRINTP))]
+  "TARGET_FLOAT"
+  "frintp\\t%<s>0, %<s>1"
+  [(set_attr "v8type" "frint")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "lceil<su_optab><GPF:mode><GPI:mode>2"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+        (FIXUORS:GPI (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+                     UNSPEC_FRINTP)))]
+  "TARGET_FLOAT"
+  "fcvtp<su>\\t%<GPI:w>0, %<GPF:s>1"
+  [(set_attr "v8type" "fcvtf2i")
+   (set_attr "mode" "<GPF:MODE>")
+   (set_attr "mode2" "<GPI:MODE>")]
+)
+
+;; floor - nothrow
+
+(define_insn "floor<mode>2"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+        UNSPEC_FRINTM))]
+  "TARGET_FLOAT"
+  "frintm\\t%<s>0, %<s>1"
+  [(set_attr "v8type" "frint")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "lfloor<su_optab><GPF:mode><GPI:mode>2"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+        (FIXUORS:GPI (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+                     UNSPEC_FRINTM)))]
+  "TARGET_FLOAT"
+  "fcvtm<su>\\t%<GPI:w>0, %<GPF:s>1"
+  [(set_attr "v8type" "fcvtf2i")
+   (set_attr "mode" "<GPF:MODE>")
+   (set_attr "mode2" "<GPI:MODE>")]
+)
+
+;; nearbyint - nothrow
+
+(define_insn "nearbyint<mode>2"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+        UNSPEC_FRINTI))]
+  "TARGET_FLOAT"
+  "frinti\\t%<s>0, %<s>1"
+  [(set_attr "v8type" "frint")
+   (set_attr "mode" "<MODE>")]
+)
+
+;; rint
+
+(define_insn "rint<mode>2"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+        UNSPEC_FRINTX))]
+  "TARGET_FLOAT"
+  "frintx\\t%<s>0, %<s>1"
+  [(set_attr "v8type" "frint")
+   (set_attr "mode" "<MODE>")]
+)
+
+;; round - nothrow
+
+(define_insn "round<mode>2"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+        UNSPEC_FRINTA))]
+  "TARGET_FLOAT"
+  "frinta\\t%<s>0, %<s>1"
+  [(set_attr "v8type" "frint")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "lround<su_optab><GPF:mode><GPI:mode>2"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+        (FIXUORS:GPI (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+                     UNSPEC_FRINTA)))]
+  "TARGET_FLOAT"
+  "fcvta<su>\\t%<GPI:w>0, %<GPF:s>1"
+  [(set_attr "v8type" "fcvtf2i")
+   (set_attr "mode" "<GPF:MODE>")
+   (set_attr "mode2" "<GPI:MODE>")]
+)
+
+;; fma - no throw
+
+(define_insn "fma<mode>4"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (fma:GPF (match_operand:GPF 1 "register_operand" "w")
+                (match_operand:GPF 2 "register_operand" "w")
+                (match_operand:GPF 3 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fmadd\\t%<s>0, %<s>1, %<s>2, %<s>3"
+  [(set_attr "v8type" "fmadd")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "fnma<mode>4"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+       (fma:GPF (neg:GPF (match_operand:GPF 1 "register_operand" "w"))
+                (match_operand:GPF 2 "register_operand" "w")
+                (match_operand:GPF 3 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fmsub\\t%<s>0, %<s>1, %<s>2, %<s>3"
+  [(set_attr "v8type" "fmadd")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "fms<mode>4"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (fma:GPF (match_operand:GPF 1 "register_operand" "w")
+                (match_operand:GPF 2 "register_operand" "w")
+                (neg:GPF (match_operand:GPF 3 "register_operand" "w"))))]
+  "TARGET_FLOAT"
+  "fnmsub\\t%<s>0, %<s>1, %<s>2, %<s>3"
+  [(set_attr "v8type" "fmadd")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "fnms<mode>4"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+       (fma:GPF (neg:GPF (match_operand:GPF 1 "register_operand" "w"))
+                (match_operand:GPF 2 "register_operand" "w")
+                (neg:GPF (match_operand:GPF 3 "register_operand" "w"))))]
+  "TARGET_FLOAT"
+  "fnmadd\\t%<s>0, %<s>1, %<s>2, %<s>3"
+  [(set_attr "v8type" "fmadd")
+   (set_attr "mode" "<MODE>")]
+)
+
+;; If signed zeros are ignored, -(a * b + c) = -a * b - c.
+(define_insn "*fnmadd<mode>4"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+       (neg:GPF (fma:GPF (match_operand:GPF 1 "register_operand" "w")
+                         (match_operand:GPF 2 "register_operand" "w")
+                         (match_operand:GPF 3 "register_operand" "w"))))]
+  "!HONOR_SIGNED_ZEROS (<MODE>mode) && TARGET_FLOAT"
+  "fnmadd\\t%<s>0, %<s>1, %<s>2, %<s>3"
+  [(set_attr "v8type" "fmadd")
+   (set_attr "mode" "<MODE>")]
+)
+
+;; -------------------------------------------------------------------
+;; Floating-point conversions
+;; -------------------------------------------------------------------
+
+(define_insn "extendsfdf2"
+  [(set (match_operand:DF 0 "register_operand" "=w")
+        (float_extend:DF (match_operand:SF 1 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fcvt\\t%d0, %s1"
+  [(set_attr "v8type" "fcvt")
+   (set_attr "mode" "DF")
+   (set_attr "mode2" "SF")]
+)
+
+(define_insn "truncdfsf2"
+  [(set (match_operand:SF 0 "register_operand" "=w")
+        (float_truncate:SF (match_operand:DF 1 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fcvt\\t%s0, %d1"
+  [(set_attr "v8type" "fcvt")
+   (set_attr "mode" "SF")
+   (set_attr "mode2" "DF")]
+)
+
+(define_insn "fix_trunc<GPF:mode><GPI:mode>2"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+        (fix:GPI (match_operand:GPF 1 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fcvtzs\\t%<GPI:w>0, %<GPF:s>1"
+  [(set_attr "v8type" "fcvtf2i")
+   (set_attr "mode" "<GPF:MODE>")
+   (set_attr "mode2" "<GPI:MODE>")]
+)
+
+(define_insn "fixuns_trunc<GPF:mode><GPI:mode>2"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+        (unsigned_fix:GPI (match_operand:GPF 1 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fcvtzu\\t%<GPI:w>0, %<GPF:s>1"
+  [(set_attr "v8type" "fcvtf2i")
+   (set_attr "mode" "<GPF:MODE>")
+   (set_attr "mode2" "<GPI:MODE>")]
+)
+
+(define_insn "float<GPI:mode><GPF:mode>2"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (float:GPF (match_operand:GPI 1 "register_operand" "r")))]
+  "TARGET_FLOAT"
+  "scvtf\\t%<GPF:s>0, %<GPI:w>1"
+  [(set_attr "v8type" "fcvti2f")
+   (set_attr "mode" "<GPF:MODE>")
+   (set_attr "mode2" "<GPI:MODE>")]
+)
+
+(define_insn "floatuns<GPI:mode><GPF:mode>2"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (unsigned_float:GPF (match_operand:GPI 1 "register_operand" "r")))]
+  "TARGET_FLOAT"
+  "ucvtf\\t%<GPF:s>0, %<GPI:w>1"
+  [(set_attr "v8type" "fcvt")
+   (set_attr "mode" "<GPF:MODE>")
+   (set_attr "mode2" "<GPI:MODE>")]
+)
+
+;; -------------------------------------------------------------------
+;; Floating-point arithmetic
+;; -------------------------------------------------------------------
+
+(define_insn "add<mode>3"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (plus:GPF
+         (match_operand:GPF 1 "register_operand" "w")
+         (match_operand:GPF 2 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fadd\\t%<s>0, %<s>1, %<s>2"
+  [(set_attr "v8type" "fadd")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "sub<mode>3"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (minus:GPF
+         (match_operand:GPF 1 "register_operand" "w")
+         (match_operand:GPF 2 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fsub\\t%<s>0, %<s>1, %<s>2"
+  [(set_attr "v8type" "fadd")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "mul<mode>3"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (mult:GPF
+         (match_operand:GPF 1 "register_operand" "w")
+         (match_operand:GPF 2 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fmul\\t%<s>0, %<s>1, %<s>2"
+  [(set_attr "v8type" "fmul")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*fnmul<mode>3"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (mult:GPF
+                (neg:GPF (match_operand:GPF 1 "register_operand" "w"))
+                (match_operand:GPF 2 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fnmul\\t%<s>0, %<s>1, %<s>2"
+  [(set_attr "v8type" "fmul")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "div<mode>3"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (div:GPF
+         (match_operand:GPF 1 "register_operand" "w")
+         (match_operand:GPF 2 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fdiv\\t%<s>0, %<s>1, %<s>2"
+  [(set_attr "v8type" "fdiv")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "neg<mode>2"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (neg:GPF (match_operand:GPF 1 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fneg\\t%<s>0, %<s>1"
+  [(set_attr "v8type" "ffarith")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "sqrt<mode>2"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (sqrt:GPF (match_operand:GPF 1 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fsqrt\\t%<s>0, %<s>1"
+  [(set_attr "v8type" "fsqrt")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "abs<mode>2"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (abs:GPF (match_operand:GPF 1 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fabs\\t%<s>0, %<s>1"
+  [(set_attr "v8type" "ffarith")
+   (set_attr "mode" "<MODE>")]
+)
+
+;; Given that smax/smin do not specify the result when either input is NaN,
+;; we could use either FMAXNM or FMAX for smax, and either FMINNM or FMIN
+;; for smin.
+
+(define_insn "smax<mode>3"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (smax:GPF (match_operand:GPF 1 "register_operand" "w")
+                 (match_operand:GPF 2 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fmaxnm\\t%<s>0, %<s>1, %<s>2"
+  [(set_attr "v8type" "fminmax")
+   (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "smin<mode>3"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+        (smin:GPF (match_operand:GPF 1 "register_operand" "w")
+                 (match_operand:GPF 2 "register_operand" "w")))]
+  "TARGET_FLOAT"
+  "fminnm\\t%<s>0, %<s>1, %<s>2"
+  [(set_attr "v8type" "fminmax")
+   (set_attr "mode" "<MODE>")]
+)
+
+;; -------------------------------------------------------------------
+;; Reload support
+;; -------------------------------------------------------------------
+
+;; Reload SP+imm where imm cannot be handled by a single ADD instruction.  
+;; Must load imm into a scratch register and copy SP to the dest reg before
+;; adding, since SP cannot be used as a source register in an ADD
+;; instruction.
+(define_expand "reload_sp_immediate"
+  [(parallel [(set (match_operand:DI 0 "register_operand" "=r")
+                  (match_operand:DI 1 "" ""))
+            (clobber (match_operand:TI 2 "register_operand" "=&r"))])]
+  ""
+  {
+    rtx sp = XEXP (operands[1], 0);
+    rtx val = XEXP (operands[1], 1);
+    unsigned regno = REGNO (operands[2]);
+    rtx scratch = operands[1];
+    gcc_assert (GET_CODE (operands[1]) == PLUS);
+    gcc_assert (sp == stack_pointer_rtx);
+    gcc_assert (CONST_INT_P (val));
+
+    /* It is possible that one of the registers we got for operands[2]
+       might coincide with that of operands[0] (which is why we made
+       it TImode).  Pick the other one to use as our scratch.  */
+    if (regno == REGNO (operands[0]))
+      regno++;
+    scratch = gen_rtx_REG (DImode, regno);
+
+    emit_move_insn (scratch, val);
+    emit_move_insn (operands[0], sp);
+    emit_insn (gen_adddi3 (operands[0], operands[0], scratch));
+    DONE;
+  }
+)
+
+(define_expand "aarch64_reload_mov<mode>"
+  [(set (match_operand:TX 0 "register_operand" "=w")
+        (match_operand:TX 1 "register_operand" "w"))
+   (clobber (match_operand:DI 2 "register_operand" "=&r"))
+  ]
+  ""
+  {
+    rtx op0 = simplify_gen_subreg (TImode, operands[0], <MODE>mode, 0);
+    rtx op1 = simplify_gen_subreg (TImode, operands[1], <MODE>mode, 0);
+    gen_aarch64_movtilow_tilow (op0, op1);
+    gen_aarch64_movdi_tihigh (operands[2], op1);
+    gen_aarch64_movtihigh_di (op0, operands[2]);
+    DONE;
+  }
+)
+
+;; The following secondary reload helpers patterns are invoked
+;; after or during reload as we don't want these patterns to start
+;; kicking in during the combiner.
+(define_insn "aarch64_movdi_tilow"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+        (truncate:DI (match_operand:TI 1 "register_operand" "w")))]
+  "reload_completed || reload_in_progress"
+  "fmov\\t%x0, %d1"
+  [(set_attr "v8type" "fmovf2i")
+   (set_attr "mode"   "DI")
+   (set_attr "length" "4")
+  ])
+
+(define_insn "aarch64_movdi_tihigh"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+        (truncate:DI
+         (lshiftrt:TI (match_operand:TI 1 "register_operand" "w")
+                      (const_int 64))))]
+  "reload_completed || reload_in_progress"
+  "fmov\\t%x0, %1.d[1]"
+  [(set_attr "v8type" "fmovf2i")
+   (set_attr "mode"   "DI")
+   (set_attr "length" "4")
+  ])
+
+(define_insn "aarch64_movtihigh_di"
+  [(set (zero_extract:TI (match_operand:TI 0 "register_operand" "+w")
+                         (const_int 64) (const_int 64))
+        (zero_extend:TI (match_operand:DI 1 "register_operand" "r")))]
+  "reload_completed || reload_in_progress"
+  "fmov\\t%0.d[1], %x1"
+
+  [(set_attr "v8type" "fmovi2f")
+   (set_attr "mode"   "DI")
+   (set_attr "length" "4")
+  ])
+
+(define_insn "aarch64_movtilow_di"
+  [(set (match_operand:TI 0 "register_operand" "=w")
+        (zero_extend:TI (match_operand:DI 1 "register_operand" "r")))]
+  "reload_completed || reload_in_progress"
+  "fmov\\t%d0, %x1"
+
+  [(set_attr "v8type" "fmovi2f")
+   (set_attr "mode"   "DI")
+   (set_attr "length" "4")
+  ])
+
+(define_insn "aarch64_movtilow_tilow"
+  [(set (match_operand:TI 0 "register_operand" "=w")
+        (zero_extend:TI 
+         (truncate:DI (match_operand:TI 1 "register_operand" "w"))))]
+  "reload_completed || reload_in_progress"
+  "fmov\\t%d0, %d1"
+
+  [(set_attr "v8type" "fmovi2f")
+   (set_attr "mode"   "DI")
+   (set_attr "length" "4")
+  ])
+
+;; There is a deliberate reason why the parameters of high and lo_sum's
+;; don't have modes for ADRP and ADD instructions.  This is to allow high
+;; and lo_sum's to be used with the labels defining the jump tables in
+;; rodata section.
+
+(define_insn "add_losym"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+       (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+                  (match_operand 2 "aarch64_valid_symref" "S")))]
+  ""
+  "add\\t%0, %1, :lo12:%a2"
+  [(set_attr "v8type" "alu")
+   (set_attr "mode" "DI")]
+
+)
+
+(define_insn "ldr_got_small"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+       (unspec:DI [(mem:DI (lo_sum:DI
+                             (match_operand:DI 1 "register_operand" "r")
+                             (match_operand:DI 2 "aarch64_valid_symref" "S")))]
+                  UNSPEC_GOTSMALLPIC))]
+  ""
+  "ldr\\t%0, [%1, #:got_lo12:%a2]"
+  [(set_attr "v8type" "load1")
+   (set_attr "mode" "DI")]
+)
+
+(define_insn "aarch64_load_tp_hard"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+       (unspec:DI [(const_int 0)] UNSPEC_TLS))]
+  ""
+  "mrs\\t%0, tpidr_el0"
+  [(set_attr "v8type" "mrs")
+   (set_attr "mode" "DI")]
+)
+
+;; The TLS ABI specifically requires that the compiler does not schedule
+;; instructions in the TLS stubs, in order to enable linker relaxation.
+;; Therefore we treat the stubs as an atomic sequence.
+(define_expand "tlsgd_small"
+ [(parallel [(set (match_operand 0 "register_operand" "")
+                  (call (mem:DI (match_dup 2)) (const_int 1)))
+            (unspec:DI [(match_operand:DI 1 "aarch64_valid_symref" "")] UNSPEC_GOTSMALLTLS)
+            (clobber (reg:DI LR_REGNUM))])]
+ ""
+{
+  operands[2] = aarch64_tls_get_addr ();
+})
+
+(define_insn "*tlsgd_small"
+  [(set (match_operand 0 "register_operand" "")
+       (call (mem:DI (match_operand:DI 2 "" "")) (const_int 1)))
+   (unspec:DI [(match_operand:DI 1 "aarch64_valid_symref" "S")] UNSPEC_GOTSMALLTLS)
+   (clobber (reg:DI LR_REGNUM))
+  ]
+  ""
+  "adrp\\tx0, %A1\;add\\tx0, x0, %L1\;bl\\t%2\;nop"
+  [(set_attr "v8type" "call")
+   (set_attr "length" "16")])
+
+(define_insn "tlsie_small"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+        (unspec:DI [(match_operand:DI 1 "aarch64_tls_ie_symref" "S")]
+                  UNSPEC_GOTSMALLTLS))]
+  ""
+  "adrp\\t%0, %A1\;ldr\\t%0, [%0, #%L1]"
+  [(set_attr "v8type" "load1")
+   (set_attr "mode" "DI")
+   (set_attr "length" "8")]
+)
+
+(define_insn "tlsle_small"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+        (unspec:DI [(match_operand:DI 1 "register_operand" "r")
+                   (match_operand:DI 2 "aarch64_tls_le_symref" "S")]
+                  UNSPEC_GOTSMALLTLS))]
+  ""
+  "add\\t%0, %1, #%G2\;add\\t%0, %0, #%L2"
+  [(set_attr "v8type" "alu")
+   (set_attr "mode" "DI")
+   (set_attr "length" "8")]
+)
+
+(define_insn "tlsdesc_small"
+  [(set (reg:DI R0_REGNUM)
+        (unspec:DI [(match_operand:DI 0 "aarch64_valid_symref" "S")]
+                  UNSPEC_TLSDESC))
+   (clobber (reg:DI LR_REGNUM))
+   (clobber (match_scratch:DI 1 "=r"))]
+  "TARGET_TLS_DESC"
+  "adrp\\tx0, %A0\;ldr\\t%1, [x0, #%L0]\;add\\tx0, x0, %L0\;.tlsdesccall\\t%0\;blr\\t%1"
+  [(set_attr "v8type" "call")
+   (set_attr "length" "16")])
+
+(define_insn "stack_tie"
+  [(set (mem:BLK (scratch))
+       (unspec:BLK [(match_operand:DI 0 "register_operand" "rk")
+                    (match_operand:DI 1 "register_operand" "rk")]
+                   UNSPEC_PRLG_STK))]
+  ""
+  ""
+  [(set_attr "length" "0")]
+)
+
+;; AdvSIMD Stuff
+(include "aarch64-simd.md")
+
+;; Synchronization Builtins
+(include "sync.md")
diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt
new file mode 100644 (file)
index 0000000..49d0125
--- /dev/null
@@ -0,0 +1,100 @@
+; Machine description for AArch64 architecture.
+; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+; Contributed by ARM Ltd.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it
+; under the terms of the GNU General Public License as published by
+; the Free Software Foundation; either version 3, or (at your option)
+; any later version.
+;
+; GCC is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+; General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3.  If not see
+; <http://www.gnu.org/licenses/>.
+
+HeaderInclude
+config/aarch64/aarch64-opts.h
+
+; The TLS dialect names to use with -mtls-dialect.
+
+Enum
+Name(tls_type) Type(enum aarch64_tls_type)
+The possible TLS dialects:
+
+EnumValue
+Enum(tls_type) String(trad) Value(TLS_TRADITIONAL)
+
+EnumValue
+Enum(tls_type) String(desc) Value(TLS_DESCRIPTORS)
+
+; The code model option names for -mcmodel.
+
+Enum
+Name(cmodel) Type(enum aarch64_code_model)
+The code model option names for -mcmodel:
+
+EnumValue
+Enum(cmodel) String(tiny) Value(AARCH64_CMODEL_TINY)
+
+EnumValue
+Enum(cmodel) String(small) Value(AARCH64_CMODEL_SMALL)
+
+EnumValue
+Enum(cmodel) String(large) Value(AARCH64_CMODEL_LARGE)
+
+; The cpu/arch option names to use in cpu/arch selection.
+
+Variable
+const char *aarch64_arch_string
+
+Variable
+const char *aarch64_cpu_string
+
+Variable
+const char *aarch64_tune_string
+
+mbig-endian
+Target Report RejectNegative Mask(BIG_END)
+Assume target CPU is configured as big endian
+
+mgeneral-regs-only
+Target Report RejectNegative Mask(GENERAL_REGS_ONLY)
+Generate code which uses only the general registers
+
+mlittle-endian
+Target Report RejectNegative InverseMask(BIG_END)
+Assume target CPU is configured as little endian
+
+mcmodel=
+Target RejectNegative Joined Enum(cmodel) Var(aarch64_cmodel_var) Init(AARCH64_CMODEL_SMALL)
+Specify the code model
+
+mstrict-align
+Target Report RejectNegative Mask(STRICT_ALIGN)
+Don't assume that unaligned accesses are handled by the system
+
+momit-leaf-frame-pointer
+Target Report Save Var(flag_omit_leaf_frame_pointer) Init(1)
+Omit the frame pointer in leaf functions
+
+mtls-dialect=
+Target RejectNegative Joined Enum(tls_type) Var(aarch64_tls_dialect) Init(TLS_DESCRIPTORS)
+Specify TLS dialect
+
+march=
+Target RejectNegative Joined Var(aarch64_arch_string)
+-march=ARCH    Use features of architecture ARCH
+
+mcpu=
+Target RejectNegative Joined Var(aarch64_cpu_string)
+-mcpu=CPU      Use features of and optimize for CPU
+
+mtune=
+Target RejectNegative Joined Var(aarch64_tune_string)
+-mtune=CPU     Optimize for CPU
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
new file mode 100644 (file)
index 0000000..e8fafa6
--- /dev/null
@@ -0,0 +1,25543 @@
+/* ARM NEON intrinsics include file.
+
+   Copyright (C) 2011, 2012 Free Software Foundation, Inc.
+   Contributed by ARM Ltd.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published
+   by the Free Software Foundation; either version 3, or (at your
+   option) any later version.
+
+   GCC is distributed in the hope that it will be useful, but WITHOUT
+   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+   License for more details.
+
+   Under Section 7 of GPL version 3, you are granted additional
+   permissions described in the GCC Runtime Library Exception, version
+   3.1, as published by the Free Software Foundation.
+
+   You should have received a copy of the GNU General Public License and
+   a copy of the GCC Runtime Library Exception along with this program;
+   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _AARCH64_NEON_H_
+#define _AARCH64_NEON_H_
+
+#include <stdint.h>
+
+typedef __builtin_aarch64_simd_qi int8x8_t
+  __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_hi int16x4_t
+  __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_si int32x2_t
+  __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_di int64x1_t;
+typedef __builtin_aarch64_simd_si int32x1_t;
+typedef __builtin_aarch64_simd_hi int16x1_t;
+typedef __builtin_aarch64_simd_qi int8x1_t;
+typedef __builtin_aarch64_simd_df float64x1_t;
+typedef __builtin_aarch64_simd_sf float32x2_t
+  __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_poly8 poly8x8_t
+  __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_poly16 poly16x4_t
+  __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_uqi uint8x8_t
+  __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_uhi uint16x4_t
+  __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_usi uint32x2_t
+  __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_udi uint64x1_t;
+typedef __builtin_aarch64_simd_usi uint32x1_t;
+typedef __builtin_aarch64_simd_uhi uint16x1_t;
+typedef __builtin_aarch64_simd_uqi uint8x1_t;
+typedef __builtin_aarch64_simd_qi int8x16_t
+  __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_hi int16x8_t
+  __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_si int32x4_t
+  __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_di int64x2_t
+  __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_sf float32x4_t
+  __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_df float64x2_t
+  __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_poly8 poly8x16_t
+  __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_poly16 poly16x8_t
+  __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_uqi uint8x16_t
+  __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_uhi uint16x8_t
+  __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_usi uint32x4_t
+  __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_udi uint64x2_t
+  __attribute__ ((__vector_size__ (16)));
+
+typedef float float32_t;
+typedef double float64_t;
+typedef __builtin_aarch64_simd_poly8 poly8_t;
+typedef __builtin_aarch64_simd_poly16 poly16_t;
+
+typedef struct int8x8x2_t
+{
+  int8x8_t val[2];
+} int8x8x2_t;
+
+typedef struct int8x16x2_t
+{
+  int8x16_t val[2];
+} int8x16x2_t;
+
+typedef struct int16x4x2_t
+{
+  int16x4_t val[2];
+} int16x4x2_t;
+
+typedef struct int16x8x2_t
+{
+  int16x8_t val[2];
+} int16x8x2_t;
+
+typedef struct int32x2x2_t
+{
+  int32x2_t val[2];
+} int32x2x2_t;
+
+typedef struct int32x4x2_t
+{
+  int32x4_t val[2];
+} int32x4x2_t;
+
+typedef struct int64x1x2_t
+{
+  int64x1_t val[2];
+} int64x1x2_t;
+
+typedef struct int64x2x2_t
+{
+  int64x2_t val[2];
+} int64x2x2_t;
+
+typedef struct uint8x8x2_t
+{
+  uint8x8_t val[2];
+} uint8x8x2_t;
+
+typedef struct uint8x16x2_t
+{
+  uint8x16_t val[2];
+} uint8x16x2_t;
+
+typedef struct uint16x4x2_t
+{
+  uint16x4_t val[2];
+} uint16x4x2_t;
+
+typedef struct uint16x8x2_t
+{
+  uint16x8_t val[2];
+} uint16x8x2_t;
+
+typedef struct uint32x2x2_t
+{
+  uint32x2_t val[2];
+} uint32x2x2_t;
+
+typedef struct uint32x4x2_t
+{
+  uint32x4_t val[2];
+} uint32x4x2_t;
+
+typedef struct uint64x1x2_t
+{
+  uint64x1_t val[2];
+} uint64x1x2_t;
+
+typedef struct uint64x2x2_t
+{
+  uint64x2_t val[2];
+} uint64x2x2_t;
+
+typedef struct float32x2x2_t
+{
+  float32x2_t val[2];
+} float32x2x2_t;
+
+typedef struct float32x4x2_t
+{
+  float32x4_t val[2];
+} float32x4x2_t;
+
+typedef struct float64x2x2_t
+{
+  float64x2_t val[2];
+} float64x2x2_t;
+
+typedef struct float64x1x2_t
+{
+  float64x1_t val[2];
+} float64x1x2_t;
+
+typedef struct poly8x8x2_t
+{
+  poly8x8_t val[2];
+} poly8x8x2_t;
+
+typedef struct poly8x16x2_t
+{
+  poly8x16_t val[2];
+} poly8x16x2_t;
+
+typedef struct poly16x4x2_t
+{
+  poly16x4_t val[2];
+} poly16x4x2_t;
+
+typedef struct poly16x8x2_t
+{
+  poly16x8_t val[2];
+} poly16x8x2_t;
+
+typedef struct int8x8x3_t
+{
+  int8x8_t val[3];
+} int8x8x3_t;
+
+typedef struct int8x16x3_t
+{
+  int8x16_t val[3];
+} int8x16x3_t;
+
+typedef struct int16x4x3_t
+{
+  int16x4_t val[3];
+} int16x4x3_t;
+
+typedef struct int16x8x3_t
+{
+  int16x8_t val[3];
+} int16x8x3_t;
+
+typedef struct int32x2x3_t
+{
+  int32x2_t val[3];
+} int32x2x3_t;
+
+typedef struct int32x4x3_t
+{
+  int32x4_t val[3];
+} int32x4x3_t;
+
+typedef struct int64x1x3_t
+{
+  int64x1_t val[3];
+} int64x1x3_t;
+
+typedef struct int64x2x3_t
+{
+  int64x2_t val[3];
+} int64x2x3_t;
+
+typedef struct uint8x8x3_t
+{
+  uint8x8_t val[3];
+} uint8x8x3_t;
+
+typedef struct uint8x16x3_t
+{
+  uint8x16_t val[3];
+} uint8x16x3_t;
+
+typedef struct uint16x4x3_t
+{
+  uint16x4_t val[3];
+} uint16x4x3_t;
+
+typedef struct uint16x8x3_t
+{
+  uint16x8_t val[3];
+} uint16x8x3_t;
+
+typedef struct uint32x2x3_t
+{
+  uint32x2_t val[3];
+} uint32x2x3_t;
+
+typedef struct uint32x4x3_t
+{
+  uint32x4_t val[3];
+} uint32x4x3_t;
+
+typedef struct uint64x1x3_t
+{
+  uint64x1_t val[3];
+} uint64x1x3_t;
+
+typedef struct uint64x2x3_t
+{
+  uint64x2_t val[3];
+} uint64x2x3_t;
+
+typedef struct float32x2x3_t
+{
+  float32x2_t val[3];
+} float32x2x3_t;
+
+typedef struct float32x4x3_t
+{
+  float32x4_t val[3];
+} float32x4x3_t;
+
+typedef struct float64x2x3_t
+{
+  float64x2_t val[3];
+} float64x2x3_t;
+
+typedef struct float64x1x3_t
+{
+  float64x1_t val[3];
+} float64x1x3_t;
+
+typedef struct poly8x8x3_t
+{
+  poly8x8_t val[3];
+} poly8x8x3_t;
+
+typedef struct poly8x16x3_t
+{
+  poly8x16_t val[3];
+} poly8x16x3_t;
+
+typedef struct poly16x4x3_t
+{
+  poly16x4_t val[3];
+} poly16x4x3_t;
+
+typedef struct poly16x8x3_t
+{
+  poly16x8_t val[3];
+} poly16x8x3_t;
+
+typedef struct int8x8x4_t
+{
+  int8x8_t val[4];
+} int8x8x4_t;
+
+typedef struct int8x16x4_t
+{
+  int8x16_t val[4];
+} int8x16x4_t;
+
+typedef struct int16x4x4_t
+{
+  int16x4_t val[4];
+} int16x4x4_t;
+
+typedef struct int16x8x4_t
+{
+  int16x8_t val[4];
+} int16x8x4_t;
+
+typedef struct int32x2x4_t
+{
+  int32x2_t val[4];
+} int32x2x4_t;
+
+typedef struct int32x4x4_t
+{
+  int32x4_t val[4];
+} int32x4x4_t;
+
+typedef struct int64x1x4_t
+{
+  int64x1_t val[4];
+} int64x1x4_t;
+
+typedef struct int64x2x4_t
+{
+  int64x2_t val[4];
+} int64x2x4_t;
+
+typedef struct uint8x8x4_t
+{
+  uint8x8_t val[4];
+} uint8x8x4_t;
+
+typedef struct uint8x16x4_t
+{
+  uint8x16_t val[4];
+} uint8x16x4_t;
+
+typedef struct uint16x4x4_t
+{
+  uint16x4_t val[4];
+} uint16x4x4_t;
+
+typedef struct uint16x8x4_t
+{
+  uint16x8_t val[4];
+} uint16x8x4_t;
+
+typedef struct uint32x2x4_t
+{
+  uint32x2_t val[4];
+} uint32x2x4_t;
+
+typedef struct uint32x4x4_t
+{
+  uint32x4_t val[4];
+} uint32x4x4_t;
+
+typedef struct uint64x1x4_t
+{
+  uint64x1_t val[4];
+} uint64x1x4_t;
+
+typedef struct uint64x2x4_t
+{
+  uint64x2_t val[4];
+} uint64x2x4_t;
+
+typedef struct float32x2x4_t
+{
+  float32x2_t val[4];
+} float32x2x4_t;
+
+typedef struct float32x4x4_t
+{
+  float32x4_t val[4];
+} float32x4x4_t;
+
+typedef struct float64x2x4_t
+{
+  float64x2_t val[4];
+} float64x2x4_t;
+
+typedef struct float64x1x4_t
+{
+  float64x1_t val[4];
+} float64x1x4_t;
+
+typedef struct poly8x8x4_t
+{
+  poly8x8_t val[4];
+} poly8x8x4_t;
+
+typedef struct poly8x16x4_t
+{
+  poly8x16_t val[4];
+} poly8x16x4_t;
+
+typedef struct poly16x4x4_t
+{
+  poly16x4_t val[4];
+} poly16x4x4_t;
+
+typedef struct poly16x8x4_t
+{
+  poly16x8_t val[4];
+} poly16x8x4_t;
+
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vadd_f32 (float32x2_t __a, float32x2_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vadd_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vadd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vaddq_f32 (float32x4_t __a, float32x4_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vaddq_f64 (float64x2_t __a, float64x2_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddl_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_saddlv8qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddl_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_saddlv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddl_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (int64x2_t) __builtin_aarch64_saddlv2si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_uaddlv8qi ((int8x8_t) __a,
+                                                  (int8x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_uaddlv4hi ((int16x4_t) __a,
+                                                  (int16x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_uaddlv2si ((int32x2_t) __a,
+                                                  (int32x2_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddl_high_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_saddl2v16qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddl_high_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_saddl2v8hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddl_high_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (int64x2_t) __builtin_aarch64_saddl2v4si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddl_high_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_uaddl2v16qi ((int8x16_t) __a,
+                                                    (int8x16_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddl_high_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_uaddl2v8hi ((int16x8_t) __a,
+                                                   (int16x8_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddl_high_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_uaddl2v4si ((int32x4_t) __a,
+                                                   (int32x4_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddw_s8 (int16x8_t __a, int8x8_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_saddwv8qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddw_s16 (int32x4_t __a, int16x4_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_saddwv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddw_s32 (int64x2_t __a, int32x2_t __b)
+{
+  return (int64x2_t) __builtin_aarch64_saddwv2si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddw_u8 (uint16x8_t __a, uint8x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_uaddwv8qi ((int16x8_t) __a,
+                                                  (int8x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddw_u16 (uint32x4_t __a, uint16x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_uaddwv4hi ((int32x4_t) __a,
+                                                  (int16x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddw_u32 (uint64x2_t __a, uint32x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_uaddwv2si ((int64x2_t) __a,
+                                                  (int32x2_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddw_high_s8 (int16x8_t __a, int8x16_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_saddw2v16qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddw_high_s16 (int32x4_t __a, int16x8_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_saddw2v8hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddw_high_s32 (int64x2_t __a, int32x4_t __b)
+{
+  return (int64x2_t) __builtin_aarch64_saddw2v4si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddw_high_u8 (uint16x8_t __a, uint8x16_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_uaddw2v16qi ((int16x8_t) __a,
+                                                    (int8x16_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddw_high_u16 (uint32x4_t __a, uint16x8_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_uaddw2v8hi ((int32x4_t) __a,
+                                                   (int16x8_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddw_high_u32 (uint64x2_t __a, uint32x4_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_uaddw2v4si ((int64x2_t) __a,
+                                                   (int32x4_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vhadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return (int8x8_t) __builtin_aarch64_shaddv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vhadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (int16x4_t) __builtin_aarch64_shaddv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vhadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (int32x2_t) __builtin_aarch64_shaddv2si (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vhadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_uhaddv8qi ((int8x8_t) __a,
+                                                 (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vhadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_uhaddv4hi ((int16x4_t) __a,
+                                                  (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vhadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_uhaddv2si ((int32x2_t) __a,
+                                                  (int32x2_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return (int8x16_t) __builtin_aarch64_shaddv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_shaddv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_shaddv4si (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_uhaddv16qi ((int8x16_t) __a,
+                                                   (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_uhaddv8hi ((int16x8_t) __a,
+                                                  (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_uhaddv4si ((int32x4_t) __a,
+                                                  (int32x4_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrhadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return (int8x8_t) __builtin_aarch64_srhaddv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrhadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (int16x4_t) __builtin_aarch64_srhaddv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrhadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (int32x2_t) __builtin_aarch64_srhaddv2si (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrhadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_urhaddv8qi ((int8x8_t) __a,
+                                                  (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrhadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_urhaddv4hi ((int16x4_t) __a,
+                                                   (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrhadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_urhaddv2si ((int32x2_t) __a,
+                                                   (int32x2_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return (int8x16_t) __builtin_aarch64_srhaddv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_srhaddv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_srhaddv4si (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_urhaddv16qi ((int8x16_t) __a,
+                                                    (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_urhaddv8hi ((int16x8_t) __a,
+                                                   (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_urhaddv4si ((int32x4_t) __a,
+                                                   (int32x4_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vaddhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (int8x8_t) __builtin_aarch64_addhnv8hi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vaddhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (int16x4_t) __builtin_aarch64_addhnv4si (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vaddhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return (int32x2_t) __builtin_aarch64_addhnv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vaddhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_addhnv8hi ((int16x8_t) __a,
+                                                 (int16x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vaddhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_addhnv4si ((int32x4_t) __a,
+                                                  (int32x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vaddhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_addhnv2di ((int64x2_t) __a,
+                                                  (int64x2_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vraddhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (int8x8_t) __builtin_aarch64_raddhnv8hi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vraddhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (int16x4_t) __builtin_aarch64_raddhnv4si (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vraddhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return (int32x2_t) __builtin_aarch64_raddhnv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vraddhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_raddhnv8hi ((int16x8_t) __a,
+                                                  (int16x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vraddhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_raddhnv4si ((int32x4_t) __a,
+                                                   (int32x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vraddhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_raddhnv2di ((int64x2_t) __a,
+                                                   (int64x2_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vaddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+  return (int8x16_t) __builtin_aarch64_addhn2v8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+  return (int16x8_t) __builtin_aarch64_addhn2v4si (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
+{
+  return (int32x4_t) __builtin_aarch64_addhn2v2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+  return (uint8x16_t) __builtin_aarch64_addhn2v8hi ((int8x8_t) __a,
+                                                   (int16x8_t) __b,
+                                                   (int16x8_t) __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+  return (uint16x8_t) __builtin_aarch64_addhn2v4si ((int16x4_t) __a,
+                                                   (int32x4_t) __b,
+                                                   (int32x4_t) __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
+{
+  return (uint32x4_t) __builtin_aarch64_addhn2v2di ((int32x2_t) __a,
+                                                   (int64x2_t) __b,
+                                                   (int64x2_t) __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vraddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+  return (int8x16_t) __builtin_aarch64_raddhn2v8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vraddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+  return (int16x8_t) __builtin_aarch64_raddhn2v4si (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vraddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
+{
+  return (int32x4_t) __builtin_aarch64_raddhn2v2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vraddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+  return (uint8x16_t) __builtin_aarch64_raddhn2v8hi ((int8x8_t) __a,
+                                                    (int16x8_t) __b,
+                                                    (int16x8_t) __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vraddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+  return (uint16x8_t) __builtin_aarch64_raddhn2v4si ((int16x4_t) __a,
+                                                    (int32x4_t) __b,
+                                                    (int32x4_t) __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vraddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
+{
+  return (uint32x4_t) __builtin_aarch64_raddhn2v2di ((int32x2_t) __a,
+                                                    (int64x2_t) __b,
+                                                    (int64x2_t) __c);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vdiv_f32 (float32x2_t __a, float32x2_t __b)
+{
+  return __a / __b;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vdivq_f32 (float32x4_t __a, float32x4_t __b)
+{
+  return __a / __b;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vdivq_f64 (float64x2_t __a, float64x2_t __b)
+{
+  return __a / __b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmul_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_f32 (float32x2_t __a, float32x2_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmul_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmul_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+  return (poly8x8_t) __builtin_aarch64_pmulv8qi ((int8x8_t) __a,
+                                                (int8x8_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmulq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_f32 (float32x4_t __a, float32x4_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmulq_f64 (float64x2_t __a, float64x2_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmulq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmulq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+  return (poly8x16_t) __builtin_aarch64_pmulv16qi ((int8x16_t) __a,
+                                                  (int8x16_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vand_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vand_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vand_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vand_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vand_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vand_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vand_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vand_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vandq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vandq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vandq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vandq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vandq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vandq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vandq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vandq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return __a & __b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vorr_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vorr_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vorr_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vorr_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vorr_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vorr_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vorr_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vorr_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vorrq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vorrq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vorrq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vorrq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vorrq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vorrq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vorrq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vorrq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return __a | __b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+veor_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+veor_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+veor_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+veor_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+veor_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+veor_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+veor_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+veor_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+veorq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+veorq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+veorq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+veorq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+veorq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+veorq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+veorq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+veorq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return __a ^ __b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vbic_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vbic_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vbic_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vbic_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vbic_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vbic_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vbic_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vbic_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vbicq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vbicq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vbicq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vbicq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vbicq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vbicq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vbicq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return __a & ~__b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vorn_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vorn_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vorn_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vorn_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vorn_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vorn_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vorn_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vorn_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vornq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vornq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vornq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vornq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vornq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vornq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vornq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vornq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return __a | ~__b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vsub_f32 (float32x2_t __a, float32x2_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsub_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsub_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vsubq_f32 (float32x4_t __a, float32x4_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vsubq_f64 (float64x2_t __a, float64x2_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubl_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_ssublv8qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubl_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_ssublv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubl_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (int64x2_t) __builtin_aarch64_ssublv2si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_usublv8qi ((int8x8_t) __a,
+                                                  (int8x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_usublv4hi ((int16x4_t) __a,
+                                                  (int16x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_usublv2si ((int32x2_t) __a,
+                                                  (int32x2_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubl_high_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_ssubl2v16qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubl_high_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_ssubl2v8hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubl_high_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (int64x2_t) __builtin_aarch64_ssubl2v4si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubl_high_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_usubl2v16qi ((int8x16_t) __a,
+                                                    (int8x16_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubl_high_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_usubl2v8hi ((int16x8_t) __a,
+                                                   (int16x8_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubl_high_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_usubl2v4si ((int32x4_t) __a,
+                                                   (int32x4_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubw_s8 (int16x8_t __a, int8x8_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_ssubwv8qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubw_s16 (int32x4_t __a, int16x4_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_ssubwv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubw_s32 (int64x2_t __a, int32x2_t __b)
+{
+  return (int64x2_t) __builtin_aarch64_ssubwv2si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubw_u8 (uint16x8_t __a, uint8x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_usubwv8qi ((int16x8_t) __a,
+                                                  (int8x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubw_u16 (uint32x4_t __a, uint16x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_usubwv4hi ((int32x4_t) __a,
+                                                  (int16x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubw_u32 (uint64x2_t __a, uint32x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_usubwv2si ((int64x2_t) __a,
+                                                  (int32x2_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubw_high_s8 (int16x8_t __a, int8x16_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_ssubw2v16qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubw_high_s16 (int32x4_t __a, int16x8_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_ssubw2v8hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubw_high_s32 (int64x2_t __a, int32x4_t __b)
+{
+  return (int64x2_t) __builtin_aarch64_ssubw2v4si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubw_high_u8 (uint16x8_t __a, uint8x16_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_usubw2v16qi ((int16x8_t) __a,
+                                                    (int8x16_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubw_high_u16 (uint32x4_t __a, uint16x8_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_usubw2v8hi ((int32x4_t) __a,
+                                                   (int16x8_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubw_high_u32 (uint64x2_t __a, uint32x4_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_usubw2v4si ((int64x2_t) __a,
+                                                   (int32x4_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return (int8x8_t) __builtin_aarch64_sqaddv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (int16x4_t) __builtin_aarch64_sqaddv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (int32x2_t) __builtin_aarch64_sqaddv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqadd_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (int64x1_t) __builtin_aarch64_sqadddi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_uqaddv8qi ((int8x8_t) __a,
+                                                 (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_uqaddv4hi ((int16x4_t) __a,
+                                                  (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_uqaddv2si ((int32x2_t) __a,
+                                                  (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqadd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_uqadddi ((int64x1_t) __a,
+                                                (int64x1_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return (int8x16_t) __builtin_aarch64_sqaddv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_sqaddv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_sqaddv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqaddq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return (int64x2_t) __builtin_aarch64_sqaddv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_uqaddv16qi ((int8x16_t) __a,
+                                                   (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_uqaddv8hi ((int16x8_t) __a,
+                                                  (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_uqaddv4si ((int32x4_t) __a,
+                                                  (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqaddq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_uqaddv2di ((int64x2_t) __a,
+                                                  (int64x2_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return (int8x8_t) __builtin_aarch64_sqsubv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (int16x4_t) __builtin_aarch64_sqsubv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (int32x2_t) __builtin_aarch64_sqsubv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqsub_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (int64x1_t) __builtin_aarch64_sqsubdi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_uqsubv8qi ((int8x8_t) __a,
+                                                 (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_uqsubv4hi ((int16x4_t) __a,
+                                                  (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_uqsubv2si ((int32x2_t) __a,
+                                                  (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqsub_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_uqsubdi ((int64x1_t) __a,
+                                                (int64x1_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return (int8x16_t) __builtin_aarch64_sqsubv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_sqsubv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_sqsubv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqsubq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return (int64x2_t) __builtin_aarch64_sqsubv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_uqsubv16qi ((int8x16_t) __a,
+                                                   (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_uqsubv8hi ((int16x8_t) __a,
+                                                  (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_uqsubv4si ((int32x4_t) __a,
+                                                  (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqsubq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_uqsubv2di ((int64x2_t) __a,
+                                                  (int64x2_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqneg_s8 (int8x8_t __a)
+{
+  return (int8x8_t) __builtin_aarch64_sqnegv8qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqneg_s16 (int16x4_t __a)
+{
+  return (int16x4_t) __builtin_aarch64_sqnegv4hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqneg_s32 (int32x2_t __a)
+{
+  return (int32x2_t) __builtin_aarch64_sqnegv2si (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqnegq_s8 (int8x16_t __a)
+{
+  return (int8x16_t) __builtin_aarch64_sqnegv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqnegq_s16 (int16x8_t __a)
+{
+  return (int16x8_t) __builtin_aarch64_sqnegv8hi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqnegq_s32 (int32x4_t __a)
+{
+  return (int32x4_t) __builtin_aarch64_sqnegv4si (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqabs_s8 (int8x8_t __a)
+{
+  return (int8x8_t) __builtin_aarch64_sqabsv8qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqabs_s16 (int16x4_t __a)
+{
+  return (int16x4_t) __builtin_aarch64_sqabsv4hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqabs_s32 (int32x2_t __a)
+{
+  return (int32x2_t) __builtin_aarch64_sqabsv2si (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqabsq_s8 (int8x16_t __a)
+{
+  return (int8x16_t) __builtin_aarch64_sqabsv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqabsq_s16 (int16x8_t __a)
+{
+  return (int16x8_t) __builtin_aarch64_sqabsv8hi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqabsq_s32 (int32x4_t __a)
+{
+  return (int32x4_t) __builtin_aarch64_sqabsv4si (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (int16x4_t) __builtin_aarch64_sqdmulhv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (int32x2_t) __builtin_aarch64_sqdmulhv2si (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_sqdmulhv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_sqdmulhv4si (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (int16x4_t) __builtin_aarch64_sqrdmulhv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (int32x2_t) __builtin_aarch64_sqrdmulhv2si (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_sqrdmulhv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_sqrdmulhv4si (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcreate_s8 (uint64_t __a)
+{
+  return (int8x8_t) __a;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vcreate_s16 (uint64_t __a)
+{
+  return (int16x4_t) __a;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcreate_s32 (uint64_t __a)
+{
+  return (int32x2_t) __a;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vcreate_s64 (uint64_t __a)
+{
+  return (int64x1_t) __a;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcreate_f32 (uint64_t __a)
+{
+  return (float32x2_t) __a;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcreate_u8 (uint64_t __a)
+{
+  return (uint8x8_t) __a;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcreate_u16 (uint64_t __a)
+{
+  return (uint16x4_t) __a;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcreate_u32 (uint64_t __a)
+{
+  return (uint32x2_t) __a;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcreate_u64 (uint64_t __a)
+{
+  return (uint64x1_t) __a;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vcreate_f64 (uint64_t __a)
+{
+  return (float64x1_t) __builtin_aarch64_createdf (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vcreate_p8 (uint64_t __a)
+{
+  return (poly8x8_t) __a;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vcreate_p16 (uint64_t __a)
+{
+  return (poly16x4_t) __a;
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vget_lane_s8 (int8x8_t __a, const int __b)
+{
+  return (int8_t) __builtin_aarch64_get_lane_signedv8qi (__a, __b);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vget_lane_s16 (int16x4_t __a, const int __b)
+{
+  return (int16_t) __builtin_aarch64_get_lane_signedv4hi (__a, __b);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vget_lane_s32 (int32x2_t __a, const int __b)
+{
+  return (int32_t) __builtin_aarch64_get_lane_signedv2si (__a, __b);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vget_lane_f32 (float32x2_t __a, const int __b)
+{
+  return (float32_t) __builtin_aarch64_get_lanev2sf (__a, __b);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vget_lane_u8 (uint8x8_t __a, const int __b)
+{
+  return (uint8_t) __builtin_aarch64_get_lane_unsignedv8qi ((int8x8_t) __a,
+                                                           __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vget_lane_u16 (uint16x4_t __a, const int __b)
+{
+  return (uint16_t) __builtin_aarch64_get_lane_unsignedv4hi ((int16x4_t) __a,
+                                                            __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vget_lane_u32 (uint32x2_t __a, const int __b)
+{
+  return (uint32_t) __builtin_aarch64_get_lane_unsignedv2si ((int32x2_t) __a,
+                                                            __b);
+}
+
+__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+vget_lane_p8 (poly8x8_t __a, const int __b)
+{
+  return (poly8_t) __builtin_aarch64_get_lane_unsignedv8qi ((int8x8_t) __a,
+                                                           __b);
+}
+
+__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+vget_lane_p16 (poly16x4_t __a, const int __b)
+{
+  return (poly16_t) __builtin_aarch64_get_lane_unsignedv4hi ((int16x4_t) __a,
+                                                            __b);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vget_lane_s64 (int64x1_t __a, const int __b)
+{
+  return (int64_t) __builtin_aarch64_get_lanedi (__a, __b);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vget_lane_u64 (uint64x1_t __a, const int __b)
+{
+  return (uint64_t) __builtin_aarch64_get_lanedi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vgetq_lane_s8 (int8x16_t __a, const int __b)
+{
+  return (int8_t) __builtin_aarch64_get_lane_signedv16qi (__a, __b);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vgetq_lane_s16 (int16x8_t __a, const int __b)
+{
+  return (int16_t) __builtin_aarch64_get_lane_signedv8hi (__a, __b);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vgetq_lane_s32 (int32x4_t __a, const int __b)
+{
+  return (int32_t) __builtin_aarch64_get_lane_signedv4si (__a, __b);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vgetq_lane_f32 (float32x4_t __a, const int __b)
+{
+  return (float32_t) __builtin_aarch64_get_lanev4sf (__a, __b);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vgetq_lane_f64 (float64x2_t __a, const int __b)
+{
+  return (float64_t) __builtin_aarch64_get_lanev2df (__a, __b);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vgetq_lane_u8 (uint8x16_t __a, const int __b)
+{
+  return (uint8_t) __builtin_aarch64_get_lane_unsignedv16qi ((int8x16_t) __a,
+                                                            __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vgetq_lane_u16 (uint16x8_t __a, const int __b)
+{
+  return (uint16_t) __builtin_aarch64_get_lane_unsignedv8hi ((int16x8_t) __a,
+                                                            __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vgetq_lane_u32 (uint32x4_t __a, const int __b)
+{
+  return (uint32_t) __builtin_aarch64_get_lane_unsignedv4si ((int32x4_t) __a,
+                                                            __b);
+}
+
+__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+vgetq_lane_p8 (poly8x16_t __a, const int __b)
+{
+  return (poly8_t) __builtin_aarch64_get_lane_unsignedv16qi ((int8x16_t) __a,
+                                                            __b);
+}
+
+__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+vgetq_lane_p16 (poly16x8_t __a, const int __b)
+{
+  return (poly16_t) __builtin_aarch64_get_lane_unsignedv8hi ((int16x8_t) __a,
+                                                            __b);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vgetq_lane_s64 (int64x2_t __a, const int __b)
+{
+  return __builtin_aarch64_get_lane_unsignedv2di (__a, __b);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vgetq_lane_u64 (uint64x2_t __a, const int __b)
+{
+  return (uint64_t) __builtin_aarch64_get_lane_unsignedv2di ((int64x2_t) __a,
+                                                            __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s8 (int8x8_t __a)
+{
+  return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv8qi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s16 (int16x4_t __a)
+{
+  return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s32 (int32x2_t __a)
+{
+  return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s64 (int64x1_t __a)
+{
+  return (poly8x8_t) __builtin_aarch64_reinterpretv8qidi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_f32 (float32x2_t __a)
+{
+  return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv2sf (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u8 (uint8x8_t __a)
+{
+  return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u16 (uint16x4_t __a)
+{
+  return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u32 (uint32x2_t __a)
+{
+  return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u64 (uint64x1_t __a)
+{
+  return (poly8x8_t) __builtin_aarch64_reinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_p16 (poly16x4_t __a)
+{
+  return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s8 (int8x16_t __a)
+{
+  return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv16qi (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s16 (int16x8_t __a)
+{
+  return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s32 (int32x4_t __a)
+{
+  return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s64 (int64x2_t __a)
+{
+  return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_f32 (float32x4_t __a)
+{
+  return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv4sf (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u8 (uint8x16_t __a)
+{
+  return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
+                                                              __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u16 (uint16x8_t __a)
+{
+  return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
+                                                             __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u32 (uint32x4_t __a)
+{
+  return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv4si ((int32x4_t)
+                                                             __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u64 (uint64x2_t __a)
+{
+  return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv2di ((int64x2_t)
+                                                             __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_p16 (poly16x8_t __a)
+{
+  return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
+                                                             __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s8 (int8x8_t __a)
+{
+  return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s16 (int16x4_t __a)
+{
+  return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv4hi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s32 (int32x2_t __a)
+{
+  return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s64 (int64x1_t __a)
+{
+  return (poly16x4_t) __builtin_aarch64_reinterpretv4hidi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_f32 (float32x2_t __a)
+{
+  return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv2sf (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u8 (uint8x8_t __a)
+{
+  return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u16 (uint16x4_t __a)
+{
+  return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u32 (uint32x2_t __a)
+{
+  return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u64 (uint64x1_t __a)
+{
+  return (poly16x4_t) __builtin_aarch64_reinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_p8 (poly8x8_t __a)
+{
+  return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s8 (int8x16_t __a)
+{
+  return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s16 (int16x8_t __a)
+{
+  return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv8hi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s32 (int32x4_t __a)
+{
+  return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s64 (int64x2_t __a)
+{
+  return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_f32 (float32x4_t __a)
+{
+  return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv4sf (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u8 (uint8x16_t __a)
+{
+  return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
+                                                             __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u16 (uint16x8_t __a)
+{
+  return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u32 (uint32x4_t __a)
+{
+  return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u64 (uint64x2_t __a)
+{
+  return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_p8 (poly8x16_t __a)
+{
+  return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
+                                                             __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s8 (int8x8_t __a)
+{
+  return (float32x2_t) __builtin_aarch64_reinterpretv2sfv8qi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s16 (int16x4_t __a)
+{
+  return (float32x2_t) __builtin_aarch64_reinterpretv2sfv4hi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s32 (int32x2_t __a)
+{
+  return (float32x2_t) __builtin_aarch64_reinterpretv2sfv2si (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s64 (int64x1_t __a)
+{
+  return (float32x2_t) __builtin_aarch64_reinterpretv2sfdi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u8 (uint8x8_t __a)
+{
+  return (float32x2_t) __builtin_aarch64_reinterpretv2sfv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u16 (uint16x4_t __a)
+{
+  return (float32x2_t) __builtin_aarch64_reinterpretv2sfv4hi ((int16x4_t)
+                                                             __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u32 (uint32x2_t __a)
+{
+  return (float32x2_t) __builtin_aarch64_reinterpretv2sfv2si ((int32x2_t)
+                                                             __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u64 (uint64x1_t __a)
+{
+  return (float32x2_t) __builtin_aarch64_reinterpretv2sfdi ((int64x1_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p8 (poly8x8_t __a)
+{
+  return (float32x2_t) __builtin_aarch64_reinterpretv2sfv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p16 (poly16x4_t __a)
+{
+  return (float32x2_t) __builtin_aarch64_reinterpretv2sfv4hi ((int16x4_t)
+                                                             __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s8 (int8x16_t __a)
+{
+  return (float32x4_t) __builtin_aarch64_reinterpretv4sfv16qi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s16 (int16x8_t __a)
+{
+  return (float32x4_t) __builtin_aarch64_reinterpretv4sfv8hi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s32 (int32x4_t __a)
+{
+  return (float32x4_t) __builtin_aarch64_reinterpretv4sfv4si (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s64 (int64x2_t __a)
+{
+  return (float32x4_t) __builtin_aarch64_reinterpretv4sfv2di (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u8 (uint8x16_t __a)
+{
+  return (float32x4_t) __builtin_aarch64_reinterpretv4sfv16qi ((int8x16_t)
+                                                              __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u16 (uint16x8_t __a)
+{
+  return (float32x4_t) __builtin_aarch64_reinterpretv4sfv8hi ((int16x8_t)
+                                                             __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u32 (uint32x4_t __a)
+{
+  return (float32x4_t) __builtin_aarch64_reinterpretv4sfv4si ((int32x4_t)
+                                                             __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u64 (uint64x2_t __a)
+{
+  return (float32x4_t) __builtin_aarch64_reinterpretv4sfv2di ((int64x2_t)
+                                                             __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p8 (poly8x16_t __a)
+{
+  return (float32x4_t) __builtin_aarch64_reinterpretv4sfv16qi ((int8x16_t)
+                                                              __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p16 (poly16x8_t __a)
+{
+  return (float32x4_t) __builtin_aarch64_reinterpretv4sfv8hi ((int16x8_t)
+                                                             __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s8 (int8x8_t __a)
+{
+  return (int64x1_t) __builtin_aarch64_reinterpretdiv8qi (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s16 (int16x4_t __a)
+{
+  return (int64x1_t) __builtin_aarch64_reinterpretdiv4hi (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s32 (int32x2_t __a)
+{
+  return (int64x1_t) __builtin_aarch64_reinterpretdiv2si (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_f32 (float32x2_t __a)
+{
+  return (int64x1_t) __builtin_aarch64_reinterpretdiv2sf (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u8 (uint8x8_t __a)
+{
+  return (int64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u16 (uint16x4_t __a)
+{
+  return (int64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u32 (uint32x2_t __a)
+{
+  return (int64x1_t) __builtin_aarch64_reinterpretdiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u64 (uint64x1_t __a)
+{
+  return (int64x1_t) __builtin_aarch64_reinterpretdidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p8 (poly8x8_t __a)
+{
+  return (int64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p16 (poly16x4_t __a)
+{
+  return (int64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s8 (int8x16_t __a)
+{
+  return (int64x2_t) __builtin_aarch64_reinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s16 (int16x8_t __a)
+{
+  return (int64x2_t) __builtin_aarch64_reinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s32 (int32x4_t __a)
+{
+  return (int64x2_t) __builtin_aarch64_reinterpretv2div4si (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_f32 (float32x4_t __a)
+{
+  return (int64x2_t) __builtin_aarch64_reinterpretv2div4sf (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u8 (uint8x16_t __a)
+{
+  return (int64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u16 (uint16x8_t __a)
+{
+  return (int64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u32 (uint32x4_t __a)
+{
+  return (int64x2_t) __builtin_aarch64_reinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u64 (uint64x2_t __a)
+{
+  return (int64x2_t) __builtin_aarch64_reinterpretv2div2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p8 (poly8x16_t __a)
+{
+  return (int64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p16 (poly16x8_t __a)
+{
+  return (int64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s8 (int8x8_t __a)
+{
+  return (uint64x1_t) __builtin_aarch64_reinterpretdiv8qi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s16 (int16x4_t __a)
+{
+  return (uint64x1_t) __builtin_aarch64_reinterpretdiv4hi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s32 (int32x2_t __a)
+{
+  return (uint64x1_t) __builtin_aarch64_reinterpretdiv2si (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s64 (int64x1_t __a)
+{
+  return (uint64x1_t) __builtin_aarch64_reinterpretdidi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_f32 (float32x2_t __a)
+{
+  return (uint64x1_t) __builtin_aarch64_reinterpretdiv2sf (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u8 (uint8x8_t __a)
+{
+  return (uint64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u16 (uint16x4_t __a)
+{
+  return (uint64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u32 (uint32x2_t __a)
+{
+  return (uint64x1_t) __builtin_aarch64_reinterpretdiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p8 (poly8x8_t __a)
+{
+  return (uint64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p16 (poly16x4_t __a)
+{
+  return (uint64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s8 (int8x16_t __a)
+{
+  return (uint64x2_t) __builtin_aarch64_reinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s16 (int16x8_t __a)
+{
+  return (uint64x2_t) __builtin_aarch64_reinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s32 (int32x4_t __a)
+{
+  return (uint64x2_t) __builtin_aarch64_reinterpretv2div4si (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s64 (int64x2_t __a)
+{
+  return (uint64x2_t) __builtin_aarch64_reinterpretv2div2di (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_f32 (float32x4_t __a)
+{
+  return (uint64x2_t) __builtin_aarch64_reinterpretv2div4sf (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u8 (uint8x16_t __a)
+{
+  return (uint64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t)
+                                                             __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u16 (uint16x8_t __a)
+{
+  return (uint64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u32 (uint32x4_t __a)
+{
+  return (uint64x2_t) __builtin_aarch64_reinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p8 (poly8x16_t __a)
+{
+  return (uint64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t)
+                                                             __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p16 (poly16x8_t __a)
+{
+  return (uint64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s16 (int16x4_t __a)
+{
+  return (int8x8_t) __builtin_aarch64_reinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s32 (int32x2_t __a)
+{
+  return (int8x8_t) __builtin_aarch64_reinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s64 (int64x1_t __a)
+{
+  return (int8x8_t) __builtin_aarch64_reinterpretv8qidi (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_f32 (float32x2_t __a)
+{
+  return (int8x8_t) __builtin_aarch64_reinterpretv8qiv2sf (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u8 (uint8x8_t __a)
+{
+  return (int8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u16 (uint16x4_t __a)
+{
+  return (int8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u32 (uint32x2_t __a)
+{
+  return (int8x8_t) __builtin_aarch64_reinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u64 (uint64x1_t __a)
+{
+  return (int8x8_t) __builtin_aarch64_reinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p8 (poly8x8_t __a)
+{
+  return (int8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p16 (poly16x4_t __a)
+{
+  return (int8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s16 (int16x8_t __a)
+{
+  return (int8x16_t) __builtin_aarch64_reinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s32 (int32x4_t __a)
+{
+  return (int8x16_t) __builtin_aarch64_reinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s64 (int64x2_t __a)
+{
+  return (int8x16_t) __builtin_aarch64_reinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_f32 (float32x4_t __a)
+{
+  return (int8x16_t) __builtin_aarch64_reinterpretv16qiv4sf (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u8 (uint8x16_t __a)
+{
+  return (int8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
+                                                             __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u16 (uint16x8_t __a)
+{
+  return (int8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u32 (uint32x4_t __a)
+{
+  return (int8x16_t) __builtin_aarch64_reinterpretv16qiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u64 (uint64x2_t __a)
+{
+  return (int8x16_t) __builtin_aarch64_reinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p8 (poly8x16_t __a)
+{
+  return (int8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
+                                                             __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p16 (poly16x8_t __a)
+{
+  return (int8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s8 (int8x8_t __a)
+{
+  return (int16x4_t) __builtin_aarch64_reinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s32 (int32x2_t __a)
+{
+  return (int16x4_t) __builtin_aarch64_reinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s64 (int64x1_t __a)
+{
+  return (int16x4_t) __builtin_aarch64_reinterpretv4hidi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_f32 (float32x2_t __a)
+{
+  return (int16x4_t) __builtin_aarch64_reinterpretv4hiv2sf (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u8 (uint8x8_t __a)
+{
+  return (int16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u16 (uint16x4_t __a)
+{
+  return (int16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u32 (uint32x2_t __a)
+{
+  return (int16x4_t) __builtin_aarch64_reinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u64 (uint64x1_t __a)
+{
+  return (int16x4_t) __builtin_aarch64_reinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p8 (poly8x8_t __a)
+{
+  return (int16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p16 (poly16x4_t __a)
+{
+  return (int16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s8 (int8x16_t __a)
+{
+  return (int16x8_t) __builtin_aarch64_reinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s32 (int32x4_t __a)
+{
+  return (int16x8_t) __builtin_aarch64_reinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s64 (int64x2_t __a)
+{
+  return (int16x8_t) __builtin_aarch64_reinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_f32 (float32x4_t __a)
+{
+  return (int16x8_t) __builtin_aarch64_reinterpretv8hiv4sf (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u8 (uint8x16_t __a)
+{
+  return (int16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u16 (uint16x8_t __a)
+{
+  return (int16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u32 (uint32x4_t __a)
+{
+  return (int16x8_t) __builtin_aarch64_reinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u64 (uint64x2_t __a)
+{
+  return (int16x8_t) __builtin_aarch64_reinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p8 (poly8x16_t __a)
+{
+  return (int16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p16 (poly16x8_t __a)
+{
+  return (int16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s8 (int8x8_t __a)
+{
+  return (int32x2_t) __builtin_aarch64_reinterpretv2siv8qi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s16 (int16x4_t __a)
+{
+  return (int32x2_t) __builtin_aarch64_reinterpretv2siv4hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s64 (int64x1_t __a)
+{
+  return (int32x2_t) __builtin_aarch64_reinterpretv2sidi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_f32 (float32x2_t __a)
+{
+  return (int32x2_t) __builtin_aarch64_reinterpretv2siv2sf (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u8 (uint8x8_t __a)
+{
+  return (int32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u16 (uint16x4_t __a)
+{
+  return (int32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u32 (uint32x2_t __a)
+{
+  return (int32x2_t) __builtin_aarch64_reinterpretv2siv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u64 (uint64x1_t __a)
+{
+  return (int32x2_t) __builtin_aarch64_reinterpretv2sidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p8 (poly8x8_t __a)
+{
+  return (int32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p16 (poly16x4_t __a)
+{
+  return (int32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s8 (int8x16_t __a)
+{
+  return (int32x4_t) __builtin_aarch64_reinterpretv4siv16qi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s16 (int16x8_t __a)
+{
+  return (int32x4_t) __builtin_aarch64_reinterpretv4siv8hi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s64 (int64x2_t __a)
+{
+  return (int32x4_t) __builtin_aarch64_reinterpretv4siv2di (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_f32 (float32x4_t __a)
+{
+  return (int32x4_t) __builtin_aarch64_reinterpretv4siv4sf (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u8 (uint8x16_t __a)
+{
+  return (int32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u16 (uint16x8_t __a)
+{
+  return (int32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u32 (uint32x4_t __a)
+{
+  return (int32x4_t) __builtin_aarch64_reinterpretv4siv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u64 (uint64x2_t __a)
+{
+  return (int32x4_t) __builtin_aarch64_reinterpretv4siv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p8 (poly8x16_t __a)
+{
+  return (int32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p16 (poly16x8_t __a)
+{
+  return (int32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s8 (int8x8_t __a)
+{
+  return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv8qi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s16 (int16x4_t __a)
+{
+  return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s32 (int32x2_t __a)
+{
+  return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s64 (int64x1_t __a)
+{
+  return (uint8x8_t) __builtin_aarch64_reinterpretv8qidi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_f32 (float32x2_t __a)
+{
+  return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv2sf (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u16 (uint16x4_t __a)
+{
+  return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u32 (uint32x2_t __a)
+{
+  return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u64 (uint64x1_t __a)
+{
+  return (uint8x8_t) __builtin_aarch64_reinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p8 (poly8x8_t __a)
+{
+  return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p16 (poly16x4_t __a)
+{
+  return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s8 (int8x16_t __a)
+{
+  return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv16qi (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s16 (int16x8_t __a)
+{
+  return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s32 (int32x4_t __a)
+{
+  return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s64 (int64x2_t __a)
+{
+  return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_f32 (float32x4_t __a)
+{
+  return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv4sf (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u16 (uint16x8_t __a)
+{
+  return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
+                                                             __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u32 (uint32x4_t __a)
+{
+  return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv4si ((int32x4_t)
+                                                             __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u64 (uint64x2_t __a)
+{
+  return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv2di ((int64x2_t)
+                                                             __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_p8 (poly8x16_t __a)
+{
+  return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
+                                                              __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_p16 (poly16x8_t __a)
+{
+  return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
+                                                             __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s8 (int8x8_t __a)
+{
+  return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s16 (int16x4_t __a)
+{
+  return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv4hi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s32 (int32x2_t __a)
+{
+  return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s64 (int64x1_t __a)
+{
+  return (uint16x4_t) __builtin_aarch64_reinterpretv4hidi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_f32 (float32x2_t __a)
+{
+  return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv2sf (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u8 (uint8x8_t __a)
+{
+  return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u32 (uint32x2_t __a)
+{
+  return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u64 (uint64x1_t __a)
+{
+  return (uint16x4_t) __builtin_aarch64_reinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p8 (poly8x8_t __a)
+{
+  return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p16 (poly16x4_t __a)
+{
+  return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s8 (int8x16_t __a)
+{
+  return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s16 (int16x8_t __a)
+{
+  return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv8hi (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s32 (int32x4_t __a)
+{
+  return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s64 (int64x2_t __a)
+{
+  return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_f32 (float32x4_t __a)
+{
+  return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv4sf (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u8 (uint8x16_t __a)
+{
+  return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
+                                                             __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u32 (uint32x4_t __a)
+{
+  return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u64 (uint64x2_t __a)
+{
+  return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p8 (poly8x16_t __a)
+{
+  return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
+                                                             __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p16 (poly16x8_t __a)
+{
+  return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s8 (int8x8_t __a)
+{
+  return (uint32x2_t) __builtin_aarch64_reinterpretv2siv8qi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s16 (int16x4_t __a)
+{
+  return (uint32x2_t) __builtin_aarch64_reinterpretv2siv4hi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s32 (int32x2_t __a)
+{
+  return (uint32x2_t) __builtin_aarch64_reinterpretv2siv2si (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s64 (int64x1_t __a)
+{
+  return (uint32x2_t) __builtin_aarch64_reinterpretv2sidi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_f32 (float32x2_t __a)
+{
+  return (uint32x2_t) __builtin_aarch64_reinterpretv2siv2sf (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u8 (uint8x8_t __a)
+{
+  return (uint32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u16 (uint16x4_t __a)
+{
+  return (uint32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u64 (uint64x1_t __a)
+{
+  return (uint32x2_t) __builtin_aarch64_reinterpretv2sidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p8 (poly8x8_t __a)
+{
+  return (uint32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p16 (poly16x4_t __a)
+{
+  return (uint32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s8 (int8x16_t __a)
+{
+  return (uint32x4_t) __builtin_aarch64_reinterpretv4siv16qi (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s16 (int16x8_t __a)
+{
+  return (uint32x4_t) __builtin_aarch64_reinterpretv4siv8hi (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s32 (int32x4_t __a)
+{
+  return (uint32x4_t) __builtin_aarch64_reinterpretv4siv4si (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s64 (int64x2_t __a)
+{
+  return (uint32x4_t) __builtin_aarch64_reinterpretv4siv2di (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_f32 (float32x4_t __a)
+{
+  return (uint32x4_t) __builtin_aarch64_reinterpretv4siv4sf (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u8 (uint8x16_t __a)
+{
+  return (uint32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t)
+                                                             __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u16 (uint16x8_t __a)
+{
+  return (uint32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u64 (uint64x2_t __a)
+{
+  return (uint32x4_t) __builtin_aarch64_reinterpretv4siv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p8 (poly8x16_t __a)
+{
+  return (uint32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t)
+                                                             __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p16 (poly16x8_t __a)
+{
+  return (uint32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vcombine_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return (int8x16_t) __builtin_aarch64_combinev8qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vcombine_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_combinev4hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcombine_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_combinev2si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcombine_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (int64x2_t) __builtin_aarch64_combinedi (__a, __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcombine_f32 (float32x2_t __a, float32x2_t __b)
+{
+  return (float32x4_t) __builtin_aarch64_combinev2sf (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcombine_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a,
+                                                    (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcombine_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a,
+                                                    (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcombine_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_combinev2si ((int32x2_t) __a,
+                                                    (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcombine_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_combinedi ((int64x1_t) __a,
+                                                  (int64x1_t) __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vcombine_f64 (float64x1_t __a, float64x1_t __b)
+{
+  return (float64x2_t) __builtin_aarch64_combinedf (__a, __b);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vcombine_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+  return (poly8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a,
+                                                    (int8x8_t) __b);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vcombine_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+  return (poly16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a,
+                                                    (int16x4_t) __b);
+}
+
+/* Start of temporary inline asm implementations.  */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vaba_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
+{
+  int8x8_t result;
+  __asm__ ("saba %0.8b,%2.8b,%3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vaba_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
+{
+  int16x4_t result;
+  __asm__ ("saba %0.4h,%2.4h,%3.4h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vaba_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
+{
+  int32x2_t result;
+  __asm__ ("saba %0.2s,%2.2s,%3.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vaba_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
+{
+  uint8x8_t result;
+  __asm__ ("uaba %0.8b,%2.8b,%3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vaba_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
+{
+  uint16x4_t result;
+  __asm__ ("uaba %0.4h,%2.4h,%3.4h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vaba_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
+{
+  uint32x2_t result;
+  __asm__ ("uaba %0.2s,%2.2s,%3.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
+{
+  int16x8_t result;
+  __asm__ ("sabal2 %0.8h,%2.16b,%3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
+{
+  int32x4_t result;
+  __asm__ ("sabal2 %0.4s,%2.8h,%3.8h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
+{
+  int64x2_t result;
+  __asm__ ("sabal2 %0.2d,%2.4s,%3.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
+{
+  uint16x8_t result;
+  __asm__ ("uabal2 %0.8h,%2.16b,%3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
+{
+  uint32x4_t result;
+  __asm__ ("uabal2 %0.4s,%2.8h,%3.8h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
+{
+  uint64x2_t result;
+  __asm__ ("uabal2 %0.2d,%2.4s,%3.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabal_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
+{
+  int16x8_t result;
+  __asm__ ("sabal %0.8h,%2.8b,%3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabal_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
+{
+  int32x4_t result;
+  __asm__ ("sabal %0.4s,%2.4h,%3.4h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabal_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
+{
+  int64x2_t result;
+  __asm__ ("sabal %0.2d,%2.2s,%3.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
+{
+  uint16x8_t result;
+  __asm__ ("uabal %0.8h,%2.8b,%3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
+{
+  uint32x4_t result;
+  __asm__ ("uabal %0.4s,%2.4h,%3.4h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
+{
+  uint64x2_t result;
+  __asm__ ("uabal %0.2d,%2.2s,%3.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
+{
+  int8x16_t result;
+  __asm__ ("saba %0.16b,%2.16b,%3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
+{
+  int16x8_t result;
+  __asm__ ("saba %0.8h,%2.8h,%3.8h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
+{
+  int32x4_t result;
+  __asm__ ("saba %0.4s,%2.4s,%3.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vabaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
+{
+  uint8x16_t result;
+  __asm__ ("uaba %0.16b,%2.16b,%3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
+{
+  uint16x8_t result;
+  __asm__ ("uaba %0.8h,%2.8h,%3.8h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
+{
+  uint32x4_t result;
+  __asm__ ("uaba %0.4s,%2.4s,%3.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vabd_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("fabd %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vabd_s8 (int8x8_t a, int8x8_t b)
+{
+  int8x8_t result;
+  __asm__ ("sabd %0.8b, %1.8b, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vabd_s16 (int16x4_t a, int16x4_t b)
+{
+  int16x4_t result;
+  __asm__ ("sabd %0.4h, %1.4h, %2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vabd_s32 (int32x2_t a, int32x2_t b)
+{
+  int32x2_t result;
+  __asm__ ("sabd %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vabd_u8 (uint8x8_t a, uint8x8_t b)
+{
+  uint8x8_t result;
+  __asm__ ("uabd %0.8b, %1.8b, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vabd_u16 (uint16x4_t a, uint16x4_t b)
+{
+  uint16x4_t result;
+  __asm__ ("uabd %0.4h, %1.4h, %2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vabd_u32 (uint32x2_t a, uint32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("uabd %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vabdd_f64 (float64_t a, float64_t b)
+{
+  float64_t result;
+  __asm__ ("fabd %d0, %d1, %d2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabdl_high_s8 (int8x16_t a, int8x16_t b)
+{
+  int16x8_t result;
+  __asm__ ("sabdl2 %0.8h,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabdl_high_s16 (int16x8_t a, int16x8_t b)
+{
+  int32x4_t result;
+  __asm__ ("sabdl2 %0.4s,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabdl_high_s32 (int32x4_t a, int32x4_t b)
+{
+  int64x2_t result;
+  __asm__ ("sabdl2 %0.2d,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabdl_high_u8 (uint8x16_t a, uint8x16_t b)
+{
+  uint16x8_t result;
+  __asm__ ("uabdl2 %0.8h,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabdl_high_u16 (uint16x8_t a, uint16x8_t b)
+{
+  uint32x4_t result;
+  __asm__ ("uabdl2 %0.4s,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabdl_high_u32 (uint32x4_t a, uint32x4_t b)
+{
+  uint64x2_t result;
+  __asm__ ("uabdl2 %0.2d,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabdl_s8 (int8x8_t a, int8x8_t b)
+{
+  int16x8_t result;
+  __asm__ ("sabdl %0.8h, %1.8b, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabdl_s16 (int16x4_t a, int16x4_t b)
+{
+  int32x4_t result;
+  __asm__ ("sabdl %0.4s, %1.4h, %2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabdl_s32 (int32x2_t a, int32x2_t b)
+{
+  int64x2_t result;
+  __asm__ ("sabdl %0.2d, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabdl_u8 (uint8x8_t a, uint8x8_t b)
+{
+  uint16x8_t result;
+  __asm__ ("uabdl %0.8h, %1.8b, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabdl_u16 (uint16x4_t a, uint16x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("uabdl %0.4s, %1.4h, %2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabdl_u32 (uint32x2_t a, uint32x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("uabdl %0.2d, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vabdq_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("fabd %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vabdq_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("fabd %0.2d, %1.2d, %2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabdq_s8 (int8x16_t a, int8x16_t b)
+{
+  int8x16_t result;
+  __asm__ ("sabd %0.16b, %1.16b, %2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabdq_s16 (int16x8_t a, int16x8_t b)
+{
+  int16x8_t result;
+  __asm__ ("sabd %0.8h, %1.8h, %2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabdq_s32 (int32x4_t a, int32x4_t b)
+{
+  int32x4_t result;
+  __asm__ ("sabd %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vabdq_u8 (uint8x16_t a, uint8x16_t b)
+{
+  uint8x16_t result;
+  __asm__ ("uabd %0.16b, %1.16b, %2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabdq_u16 (uint16x8_t a, uint16x8_t b)
+{
+  uint16x8_t result;
+  __asm__ ("uabd %0.8h, %1.8h, %2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabdq_u32 (uint32x4_t a, uint32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("uabd %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vabds_f32 (float32_t a, float32_t b)
+{
+  float32_t result;
+  __asm__ ("fabd %s0, %s1, %s2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vabs_f32 (float32x2_t a)
+{
+  float32x2_t result;
+  __asm__ ("fabs %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vabs_s8 (int8x8_t a)
+{
+  int8x8_t result;
+  __asm__ ("abs %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vabs_s16 (int16x4_t a)
+{
+  int16x4_t result;
+  __asm__ ("abs %0.4h,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vabs_s32 (int32x2_t a)
+{
+  int32x2_t result;
+  __asm__ ("abs %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vabsq_f32 (float32x4_t a)
+{
+  float32x4_t result;
+  __asm__ ("fabs %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vabsq_f64 (float64x2_t a)
+{
+  float64x2_t result;
+  __asm__ ("fabs %0.2d,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabsq_s8 (int8x16_t a)
+{
+  int8x16_t result;
+  __asm__ ("abs %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabsq_s16 (int16x8_t a)
+{
+  int16x8_t result;
+  __asm__ ("abs %0.8h,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabsq_s32 (int32x4_t a)
+{
+  int32x4_t result;
+  __asm__ ("abs %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabsq_s64 (int64x2_t a)
+{
+  int64x2_t result;
+  __asm__ ("abs %0.2d,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vacged_f64 (float64_t a, float64_t b)
+{
+  float64_t result;
+  __asm__ ("facge %d0,%d1,%d2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vacges_f32 (float32_t a, float32_t b)
+{
+  float32_t result;
+  __asm__ ("facge %s0,%s1,%s2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vacgtd_f64 (float64_t a, float64_t b)
+{
+  float64_t result;
+  __asm__ ("facgt %d0,%d1,%d2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vacgts_f32 (float32_t a, float32_t b)
+{
+  float32_t result;
+  __asm__ ("facgt %s0,%s1,%s2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vaddlv_s8 (int8x8_t a)
+{
+  int16_t result;
+  __asm__ ("saddlv %h0,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vaddlv_s16 (int16x4_t a)
+{
+  int32_t result;
+  __asm__ ("saddlv %s0,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vaddlv_u8 (uint8x8_t a)
+{
+  uint16_t result;
+  __asm__ ("uaddlv %h0,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vaddlv_u16 (uint16x4_t a)
+{
+  uint32_t result;
+  __asm__ ("uaddlv %s0,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vaddlvq_s8 (int8x16_t a)
+{
+  int16_t result;
+  __asm__ ("saddlv %h0,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vaddlvq_s16 (int16x8_t a)
+{
+  int32_t result;
+  __asm__ ("saddlv %s0,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vaddlvq_s32 (int32x4_t a)
+{
+  int64_t result;
+  __asm__ ("saddlv %d0,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vaddlvq_u8 (uint8x16_t a)
+{
+  uint16_t result;
+  __asm__ ("uaddlv %h0,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vaddlvq_u16 (uint16x8_t a)
+{
+  uint32_t result;
+  __asm__ ("uaddlv %s0,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vaddlvq_u32 (uint32x4_t a)
+{
+  uint64_t result;
+  __asm__ ("uaddlv %d0,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vaddv_s8 (int8x8_t a)
+{
+  int8_t result;
+  __asm__ ("addv %b0,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vaddv_s16 (int16x4_t a)
+{
+  int16_t result;
+  __asm__ ("addv %h0,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vaddv_u8 (uint8x8_t a)
+{
+  uint8_t result;
+  __asm__ ("addv %b0,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vaddv_u16 (uint16x4_t a)
+{
+  uint16_t result;
+  __asm__ ("addv %h0,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vaddvq_s8 (int8x16_t a)
+{
+  int8_t result;
+  __asm__ ("addv %b0,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vaddvq_s16 (int16x8_t a)
+{
+  int16_t result;
+  __asm__ ("addv %h0,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vaddvq_s32 (int32x4_t a)
+{
+  int32_t result;
+  __asm__ ("addv %s0,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vaddvq_u8 (uint8x16_t a)
+{
+  uint8_t result;
+  __asm__ ("addv %b0,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vaddvq_u16 (uint16x8_t a)
+{
+  uint16_t result;
+  __asm__ ("addv %h0,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vaddvq_u32 (uint32x4_t a)
+{
+  uint32_t result;
+  __asm__ ("addv %s0,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vbsl_f32 (uint32x2_t a, float32x2_t b, float32x2_t c)
+{
+  float32x2_t result;
+  __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vbsl_p8 (uint8x8_t a, poly8x8_t b, poly8x8_t c)
+{
+  poly8x8_t result;
+  __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vbsl_p16 (uint16x4_t a, poly16x4_t b, poly16x4_t c)
+{
+  poly16x4_t result;
+  __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vbsl_s8 (uint8x8_t a, int8x8_t b, int8x8_t c)
+{
+  int8x8_t result;
+  __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vbsl_s16 (uint16x4_t a, int16x4_t b, int16x4_t c)
+{
+  int16x4_t result;
+  __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vbsl_s32 (uint32x2_t a, int32x2_t b, int32x2_t c)
+{
+  int32x2_t result;
+  __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vbsl_s64 (uint64x1_t a, int64x1_t b, int64x1_t c)
+{
+  int64x1_t result;
+  __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vbsl_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
+{
+  uint8x8_t result;
+  __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vbsl_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
+{
+  uint16x4_t result;
+  __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vbsl_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
+{
+  uint32x2_t result;
+  __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vbsl_u64 (uint64x1_t a, uint64x1_t b, uint64x1_t c)
+{
+  uint64x1_t result;
+  __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vbslq_f32 (uint32x4_t a, float32x4_t b, float32x4_t c)
+{
+  float32x4_t result;
+  __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vbslq_f64 (uint64x2_t a, float64x2_t b, float64x2_t c)
+{
+  float64x2_t result;
+  __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vbslq_p8 (uint8x16_t a, poly8x16_t b, poly8x16_t c)
+{
+  poly8x16_t result;
+  __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vbslq_p16 (uint16x8_t a, poly16x8_t b, poly16x8_t c)
+{
+  poly16x8_t result;
+  __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vbslq_s8 (uint8x16_t a, int8x16_t b, int8x16_t c)
+{
+  int8x16_t result;
+  __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vbslq_s16 (uint16x8_t a, int16x8_t b, int16x8_t c)
+{
+  int16x8_t result;
+  __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vbslq_s32 (uint32x4_t a, int32x4_t b, int32x4_t c)
+{
+  int32x4_t result;
+  __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vbslq_s64 (uint64x2_t a, int64x2_t b, int64x2_t c)
+{
+  int64x2_t result;
+  __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vbslq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
+{
+  uint8x16_t result;
+  __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vbslq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
+{
+  uint16x8_t result;
+  __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vbslq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
+{
+  uint32x4_t result;
+  __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vbslq_u64 (uint64x2_t a, uint64x2_t b, uint64x2_t c)
+{
+  uint64x2_t result;
+  __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcage_f32 (float32x2_t a, float32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("facge %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcageq_f32 (float32x4_t a, float32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("facge %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcageq_f64 (float64x2_t a, float64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("facge %0.2d, %1.2d, %2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcagt_f32 (float32x2_t a, float32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("facgt %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcagtq_f32 (float32x4_t a, float32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("facgt %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcagtq_f64 (float64x2_t a, float64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("facgt %0.2d, %1.2d, %2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcale_f32 (float32x2_t a, float32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("facge %0.2s, %2.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcaleq_f32 (float32x4_t a, float32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("facge %0.4s, %2.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcaleq_f64 (float64x2_t a, float64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("facge %0.2d, %2.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcalt_f32 (float32x2_t a, float32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("facgt %0.2s, %2.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcaltq_f32 (float32x4_t a, float32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("facgt %0.4s, %2.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcaltq_f64 (float64x2_t a, float64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("facgt %0.2d, %2.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_f32 (float32x2_t a, float32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("fcmeq %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceq_f64 (float64x1_t a, float64x1_t b)
+{
+  uint64x1_t result;
+  __asm__ ("fcmeq %d0, %d1, %d2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vceqd_f64 (float64_t a, float64_t b)
+{
+  float64_t result;
+  __asm__ ("fcmeq %d0,%d1,%d2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_f32 (float32x4_t a, float32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("fcmeq %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vceqq_f64 (float64x2_t a, float64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("fcmeq %0.2d, %1.2d, %2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vceqs_f32 (float32_t a, float32_t b)
+{
+  float32_t result;
+  __asm__ ("fcmeq %s0,%s1,%s2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vceqzd_f64 (float64_t a)
+{
+  float64_t result;
+  __asm__ ("fcmeq %d0,%d1,#0"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vceqzs_f32 (float32_t a)
+{
+  float32_t result;
+  __asm__ ("fcmeq %s0,%s1,#0"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_f32 (float32x2_t a, float32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("fcmge %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcge_f64 (float64x1_t a, float64x1_t b)
+{
+  uint64x1_t result;
+  __asm__ ("fcmge %d0, %d1, %d2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_f32 (float32x4_t a, float32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("fcmge %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgeq_f64 (float64x2_t a, float64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("fcmge %0.2d, %1.2d, %2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_f32 (float32x2_t a, float32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("fcmgt %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgt_f64 (float64x1_t a, float64x1_t b)
+{
+  uint64x1_t result;
+  __asm__ ("fcmgt %d0, %d1, %d2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_f32 (float32x4_t a, float32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("fcmgt %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgtq_f64 (float64x2_t a, float64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("fcmgt %0.2d, %1.2d, %2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_f32 (float32x2_t a, float32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("fcmge %0.2s, %2.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcle_f64 (float64x1_t a, float64x1_t b)
+{
+  uint64x1_t result;
+  __asm__ ("fcmge %d0, %d2, %d1"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_f32 (float32x4_t a, float32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("fcmge %0.4s, %2.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcleq_f64 (float64x2_t a, float64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("fcmge %0.2d, %2.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcls_s8 (int8x8_t a)
+{
+  int8x8_t result;
+  __asm__ ("cls %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vcls_s16 (int16x4_t a)
+{
+  int16x4_t result;
+  __asm__ ("cls %0.4h,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcls_s32 (int32x2_t a)
+{
+  int32x2_t result;
+  __asm__ ("cls %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vclsq_s8 (int8x16_t a)
+{
+  int8x16_t result;
+  __asm__ ("cls %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vclsq_s16 (int16x8_t a)
+{
+  int16x8_t result;
+  __asm__ ("cls %0.8h,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vclsq_s32 (int32x4_t a)
+{
+  int32x4_t result;
+  __asm__ ("cls %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_f32 (float32x2_t a, float32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("fcmgt %0.2s, %2.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclt_f64 (float64x1_t a, float64x1_t b)
+{
+  uint64x1_t result;
+  __asm__ ("fcmgt %d0, %d2, %d1"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_f32 (float32x4_t a, float32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("fcmgt %0.4s, %2.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcltq_f64 (float64x2_t a, float64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("fcmgt %0.2d, %2.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vclz_s8 (int8x8_t a)
+{
+  int8x8_t result;
+  __asm__ ("clz %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vclz_s16 (int16x4_t a)
+{
+  int16x4_t result;
+  __asm__ ("clz %0.4h,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vclz_s32 (int32x2_t a)
+{
+  int32x2_t result;
+  __asm__ ("clz %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclz_u8 (uint8x8_t a)
+{
+  uint8x8_t result;
+  __asm__ ("clz %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclz_u16 (uint16x4_t a)
+{
+  uint16x4_t result;
+  __asm__ ("clz %0.4h,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclz_u32 (uint32x2_t a)
+{
+  uint32x2_t result;
+  __asm__ ("clz %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vclzq_s8 (int8x16_t a)
+{
+  int8x16_t result;
+  __asm__ ("clz %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vclzq_s16 (int16x8_t a)
+{
+  int16x8_t result;
+  __asm__ ("clz %0.8h,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vclzq_s32 (int32x4_t a)
+{
+  int32x4_t result;
+  __asm__ ("clz %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vclzq_u8 (uint8x16_t a)
+{
+  uint8x16_t result;
+  __asm__ ("clz %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vclzq_u16 (uint16x8_t a)
+{
+  uint16x8_t result;
+  __asm__ ("clz %0.8h,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vclzq_u32 (uint32x4_t a)
+{
+  uint32x4_t result;
+  __asm__ ("clz %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vcnt_p8 (poly8x8_t a)
+{
+  poly8x8_t result;
+  __asm__ ("cnt %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcnt_s8 (int8x8_t a)
+{
+  int8x8_t result;
+  __asm__ ("cnt %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcnt_u8 (uint8x8_t a)
+{
+  uint8x8_t result;
+  __asm__ ("cnt %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vcntq_p8 (poly8x16_t a)
+{
+  poly8x16_t result;
+  __asm__ ("cnt %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vcntq_s8 (int8x16_t a)
+{
+  int8x16_t result;
+  __asm__ ("cnt %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcntq_u8 (uint8x16_t a)
+{
+  uint8x16_t result;
+  __asm__ ("cnt %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vcopyq_lane_f32(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t c_ = (c);                                            \
+       float32x4_t a_ = (a);                                            \
+       float32x4_t result;                                              \
+       __asm__ ("ins %0.s[%2], %3.s[%4]"                                \
+                : "=w"(result)                                          \
+                : "0"(a_), "i"(b), "w"(c_), "i"(d)                      \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcopyq_lane_f64(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x2_t c_ = (c);                                            \
+       float64x2_t a_ = (a);                                            \
+       float64x2_t result;                                              \
+       __asm__ ("ins %0.d[%2], %3.d[%4]"                                \
+                : "=w"(result)                                          \
+                : "0"(a_), "i"(b), "w"(c_), "i"(d)                      \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcopyq_lane_p8(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       poly8x16_t c_ = (c);                                             \
+       poly8x16_t a_ = (a);                                             \
+       poly8x16_t result;                                               \
+       __asm__ ("ins %0.b[%2], %3.b[%4]"                                \
+                : "=w"(result)                                          \
+                : "0"(a_), "i"(b), "w"(c_), "i"(d)                      \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcopyq_lane_p16(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       poly16x8_t c_ = (c);                                             \
+       poly16x8_t a_ = (a);                                             \
+       poly16x8_t result;                                               \
+       __asm__ ("ins %0.h[%2], %3.h[%4]"                                \
+                : "=w"(result)                                          \
+                : "0"(a_), "i"(b), "w"(c_), "i"(d)                      \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcopyq_lane_s8(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int8x16_t c_ = (c);                                              \
+       int8x16_t a_ = (a);                                              \
+       int8x16_t result;                                                \
+       __asm__ ("ins %0.b[%2], %3.b[%4]"                                \
+                : "=w"(result)                                          \
+                : "0"(a_), "i"(b), "w"(c_), "i"(d)                      \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcopyq_lane_s16(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t c_ = (c);                                              \
+       int16x8_t a_ = (a);                                              \
+       int16x8_t result;                                                \
+       __asm__ ("ins %0.h[%2], %3.h[%4]"                                \
+                : "=w"(result)                                          \
+                : "0"(a_), "i"(b), "w"(c_), "i"(d)                      \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcopyq_lane_s32(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t c_ = (c);                                              \
+       int32x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("ins %0.s[%2], %3.s[%4]"                                \
+                : "=w"(result)                                          \
+                : "0"(a_), "i"(b), "w"(c_), "i"(d)                      \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcopyq_lane_s64(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x2_t c_ = (c);                                              \
+       int64x2_t a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("ins %0.d[%2], %3.d[%4]"                                \
+                : "=w"(result)                                          \
+                : "0"(a_), "i"(b), "w"(c_), "i"(d)                      \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcopyq_lane_u8(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint8x16_t c_ = (c);                                             \
+       uint8x16_t a_ = (a);                                             \
+       uint8x16_t result;                                               \
+       __asm__ ("ins %0.b[%2], %3.b[%4]"                                \
+                : "=w"(result)                                          \
+                : "0"(a_), "i"(b), "w"(c_), "i"(d)                      \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcopyq_lane_u16(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t c_ = (c);                                             \
+       uint16x8_t a_ = (a);                                             \
+       uint16x8_t result;                                               \
+       __asm__ ("ins %0.h[%2], %3.h[%4]"                                \
+                : "=w"(result)                                          \
+                : "0"(a_), "i"(b), "w"(c_), "i"(d)                      \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcopyq_lane_u32(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t c_ = (c);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("ins %0.s[%2], %3.s[%4]"                                \
+                : "=w"(result)                                          \
+                : "0"(a_), "i"(b), "w"(c_), "i"(d)                      \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcopyq_lane_u64(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x2_t c_ = (c);                                             \
+       uint64x2_t a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("ins %0.d[%2], %3.d[%4]"                                \
+                : "=w"(result)                                          \
+                : "0"(a_), "i"(b), "w"(c_), "i"(d)                      \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+/* vcvt_f16_f32 not supported */
+
+/* vcvt_f32_f16 not supported */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_f32_f64 (float64x2_t a)
+{
+  float32x2_t result;
+  __asm__ ("fcvtn %0.2s,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_f32_s32 (int32x2_t a)
+{
+  float32x2_t result;
+  __asm__ ("scvtf %0.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_f32_u32 (uint32x2_t a)
+{
+  float32x2_t result;
+  __asm__ ("ucvtf %0.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vcvt_f64_f32 (float32x2_t a)
+{
+  float64x2_t result;
+  __asm__ ("fcvtl %0.2d,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vcvt_f64_s64 (uint64x1_t a)
+{
+  float64x1_t result;
+  __asm__ ("scvtf %d0, %d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vcvt_f64_u64 (uint64x1_t a)
+{
+  float64x1_t result;
+  __asm__ ("ucvtf %d0, %d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+/* vcvt_high_f16_f32 not supported */
+
+/* vcvt_high_f32_f16 not supported */
+
+static float32x2_t vdup_n_f32 (float32_t);
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvt_high_f32_f64 (float32x2_t a, float64x2_t b)
+{
+  float32x4_t result = vcombine_f32 (a, vdup_n_f32 (0.0f));
+  __asm__ ("fcvtn2 %0.4s,%2.2d"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vcvt_high_f64_f32 (float32x4_t a)
+{
+  float64x2_t result;
+  __asm__ ("fcvtl2 %0.2d,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vcvt_n_f32_s32(a, b)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x2_t a_ = (a);                                              \
+       float32x2_t result;                                              \
+       __asm__ ("scvtf %0.2s, %1.2s, #%2"                               \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvt_n_f32_u32(a, b)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x2_t a_ = (a);                                             \
+       float32x2_t result;                                              \
+       __asm__ ("ucvtf %0.2s, %1.2s, #%2"                               \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvt_n_s32_f32(a, b)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x2_t a_ = (a);                                            \
+       int32x2_t result;                                                \
+       __asm__ ("fcvtzs %0.2s, %1.2s, #%2"                              \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvt_n_u32_f32(a, b)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x2_t a_ = (a);                                            \
+       uint32x2_t result;                                               \
+       __asm__ ("fcvtzu %0.2s, %1.2s, #%2"                              \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvt_s32_f32 (float32x2_t a)
+{
+  int32x2_t result;
+  __asm__ ("fcvtzs %0.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvt_u32_f32 (float32x2_t a)
+{
+  uint32x2_t result;
+  __asm__ ("fcvtzu %0.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvta_s32_f32 (float32x2_t a)
+{
+  int32x2_t result;
+  __asm__ ("fcvtas %0.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvta_u32_f32 (float32x2_t a)
+{
+  uint32x2_t result;
+  __asm__ ("fcvtau %0.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtad_s64_f64 (float64_t a)
+{
+  float64_t result;
+  __asm__ ("fcvtas %d0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtad_u64_f64 (float64_t a)
+{
+  float64_t result;
+  __asm__ ("fcvtau %d0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtaq_s32_f32 (float32x4_t a)
+{
+  int32x4_t result;
+  __asm__ ("fcvtas %0.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcvtaq_s64_f64 (float64x2_t a)
+{
+  int64x2_t result;
+  __asm__ ("fcvtas %0.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtaq_u32_f32 (float32x4_t a)
+{
+  uint32x4_t result;
+  __asm__ ("fcvtau %0.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcvtaq_u64_f64 (float64x2_t a)
+{
+  uint64x2_t result;
+  __asm__ ("fcvtau %0.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtas_s64_f64 (float32_t a)
+{
+  float32_t result;
+  __asm__ ("fcvtas %s0,%s1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtas_u64_f64 (float32_t a)
+{
+  float32_t result;
+  __asm__ ("fcvtau %s0,%s1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vcvtd_f64_s64 (int64_t a)
+{
+  int64_t result;
+  __asm__ ("scvtf %d0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcvtd_f64_u64 (uint64_t a)
+{
+  uint64_t result;
+  __asm__ ("ucvtf %d0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vcvtd_n_f64_s64(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       int64_t a_ = (a);                                                \
+       int64_t result;                                                  \
+       __asm__ ("scvtf %d0,%d1,%2"                                      \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvtd_n_f64_u64(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64_t a_ = (a);                                               \
+       uint64_t result;                                                 \
+       __asm__ ("ucvtf %d0,%d1,%2"                                      \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvtd_n_s64_f64(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       float64_t a_ = (a);                                              \
+       float64_t result;                                                \
+       __asm__ ("fcvtzs %d0,%d1,%2"                                     \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvtd_n_u64_f64(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       float64_t a_ = (a);                                              \
+       float64_t result;                                                \
+       __asm__ ("fcvtzu %d0,%d1,%2"                                     \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtd_s64_f64 (float64_t a)
+{
+  float64_t result;
+  __asm__ ("fcvtzs %d0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtd_u64_f64 (float64_t a)
+{
+  float64_t result;
+  __asm__ ("fcvtzu %d0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvtm_s32_f32 (float32x2_t a)
+{
+  int32x2_t result;
+  __asm__ ("fcvtms %0.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvtm_u32_f32 (float32x2_t a)
+{
+  uint32x2_t result;
+  __asm__ ("fcvtmu %0.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtmd_s64_f64 (float64_t a)
+{
+  float64_t result;
+  __asm__ ("fcvtms %d0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtmd_u64_f64 (float64_t a)
+{
+  float64_t result;
+  __asm__ ("fcvtmu %d0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtmq_s32_f32 (float32x4_t a)
+{
+  int32x4_t result;
+  __asm__ ("fcvtms %0.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcvtmq_s64_f64 (float64x2_t a)
+{
+  int64x2_t result;
+  __asm__ ("fcvtms %0.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtmq_u32_f32 (float32x4_t a)
+{
+  uint32x4_t result;
+  __asm__ ("fcvtmu %0.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcvtmq_u64_f64 (float64x2_t a)
+{
+  uint64x2_t result;
+  __asm__ ("fcvtmu %0.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtms_s64_f64 (float32_t a)
+{
+  float32_t result;
+  __asm__ ("fcvtms %s0,%s1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtms_u64_f64 (float32_t a)
+{
+  float32_t result;
+  __asm__ ("fcvtmu %s0,%s1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvtn_s32_f32 (float32x2_t a)
+{
+  int32x2_t result;
+  __asm__ ("fcvtns %0.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvtn_u32_f32 (float32x2_t a)
+{
+  uint32x2_t result;
+  __asm__ ("fcvtnu %0.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtnd_s64_f64 (float64_t a)
+{
+  float64_t result;
+  __asm__ ("fcvtns %d0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtnd_u64_f64 (float64_t a)
+{
+  float64_t result;
+  __asm__ ("fcvtnu %d0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtnq_s32_f32 (float32x4_t a)
+{
+  int32x4_t result;
+  __asm__ ("fcvtns %0.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcvtnq_s64_f64 (float64x2_t a)
+{
+  int64x2_t result;
+  __asm__ ("fcvtns %0.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtnq_u32_f32 (float32x4_t a)
+{
+  uint32x4_t result;
+  __asm__ ("fcvtnu %0.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcvtnq_u64_f64 (float64x2_t a)
+{
+  uint64x2_t result;
+  __asm__ ("fcvtnu %0.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtns_s64_f64 (float32_t a)
+{
+  float32_t result;
+  __asm__ ("fcvtns %s0,%s1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtns_u64_f64 (float32_t a)
+{
+  float32_t result;
+  __asm__ ("fcvtnu %s0,%s1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvtp_s32_f32 (float32x2_t a)
+{
+  int32x2_t result;
+  __asm__ ("fcvtps %0.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvtp_u32_f32 (float32x2_t a)
+{
+  uint32x2_t result;
+  __asm__ ("fcvtpu %0.2s, %1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtpd_s64_f64 (float64_t a)
+{
+  float64_t result;
+  __asm__ ("fcvtps %d0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtpd_u64_f64 (float64_t a)
+{
+  float64_t result;
+  __asm__ ("fcvtpu %d0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtpq_s32_f32 (float32x4_t a)
+{
+  int32x4_t result;
+  __asm__ ("fcvtps %0.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcvtpq_s64_f64 (float64x2_t a)
+{
+  int64x2_t result;
+  __asm__ ("fcvtps %0.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtpq_u32_f32 (float32x4_t a)
+{
+  uint32x4_t result;
+  __asm__ ("fcvtpu %0.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcvtpq_u64_f64 (float64x2_t a)
+{
+  uint64x2_t result;
+  __asm__ ("fcvtpu %0.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtps_s64_f64 (float32_t a)
+{
+  float32_t result;
+  __asm__ ("fcvtps %s0,%s1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtps_u64_f64 (float32_t a)
+{
+  float32_t result;
+  __asm__ ("fcvtpu %s0,%s1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_f32_s32 (int32x4_t a)
+{
+  float32x4_t result;
+  __asm__ ("scvtf %0.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_f32_u32 (uint32x4_t a)
+{
+  float32x4_t result;
+  __asm__ ("ucvtf %0.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vcvtq_f64_s64 (int64x2_t a)
+{
+  float64x2_t result;
+  __asm__ ("scvtf %0.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vcvtq_f64_u64 (uint64x2_t a)
+{
+  float64x2_t result;
+  __asm__ ("ucvtf %0.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vcvtq_n_f32_s32(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t a_ = (a);                                              \
+       float32x4_t result;                                              \
+       __asm__ ("scvtf %0.4s, %1.4s, #%2"                               \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvtq_n_f32_u32(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t a_ = (a);                                             \
+       float32x4_t result;                                              \
+       __asm__ ("ucvtf %0.4s, %1.4s, #%2"                               \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvtq_n_f64_s64(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x2_t a_ = (a);                                              \
+       float64x2_t result;                                              \
+       __asm__ ("scvtf %0.2d, %1.2d, #%2"                               \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvtq_n_f64_u64(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x2_t a_ = (a);                                             \
+       float64x2_t result;                                              \
+       __asm__ ("ucvtf %0.2d, %1.2d, #%2"                               \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvtq_n_s32_f32(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t a_ = (a);                                            \
+       int32x4_t result;                                                \
+       __asm__ ("fcvtzs %0.4s, %1.4s, #%2"                              \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvtq_n_s64_f64(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x2_t a_ = (a);                                            \
+       int64x2_t result;                                                \
+       __asm__ ("fcvtzs %0.2d, %1.2d, #%2"                              \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvtq_n_u32_f32(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t a_ = (a);                                            \
+       uint32x4_t result;                                               \
+       __asm__ ("fcvtzu %0.4s, %1.4s, #%2"                              \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvtq_n_u64_f64(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x2_t a_ = (a);                                            \
+       uint64x2_t result;                                               \
+       __asm__ ("fcvtzu %0.2d, %1.2d, #%2"                              \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtq_s32_f32 (float32x4_t a)
+{
+  int32x4_t result;
+  __asm__ ("fcvtzs %0.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcvtq_s64_f64 (float64x2_t a)
+{
+  int64x2_t result;
+  __asm__ ("fcvtzs %0.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtq_u32_f32 (float32x4_t a)
+{
+  uint32x4_t result;
+  __asm__ ("fcvtzu %0.4s, %1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcvtq_u64_f64 (float64x2_t a)
+{
+  uint64x2_t result;
+  __asm__ ("fcvtzu %0.2d, %1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvts_f64_s32 (int32_t a)
+{
+  int32_t result;
+  __asm__ ("scvtf %s0,%s1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvts_f64_u32 (uint32_t a)
+{
+  uint32_t result;
+  __asm__ ("ucvtf %s0,%s1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vcvts_n_f32_s32(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       int32_t a_ = (a);                                                \
+       int32_t result;                                                  \
+       __asm__ ("scvtf %s0,%s1,%2"                                      \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvts_n_f32_u32(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32_t a_ = (a);                                               \
+       uint32_t result;                                                 \
+       __asm__ ("ucvtf %s0,%s1,%2"                                      \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvts_n_s32_f32(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       float32_t a_ = (a);                                              \
+       float32_t result;                                                \
+       __asm__ ("fcvtzs %s0,%s1,%2"                                     \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vcvts_n_u32_f32(a, b)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       float32_t a_ = (a);                                              \
+       float32_t result;                                                \
+       __asm__ ("fcvtzu %s0,%s1,%2"                                     \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvts_s64_f64 (float32_t a)
+{
+  float32_t result;
+  __asm__ ("fcvtzs %s0,%s1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvts_u64_f64 (float32_t a)
+{
+  float32_t result;
+  __asm__ ("fcvtzu %s0,%s1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvtx_f32_f64 (float64x2_t a)
+{
+  float32x2_t result;
+  __asm__ ("fcvtxn %0.2s,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtx_high_f32_f64 (float64x2_t a)
+{
+  float32x4_t result;
+  __asm__ ("fcvtxn2 %0.4s,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtxd_f32_f64 (float64_t a)
+{
+  float32_t result;
+  __asm__ ("fcvtxn %s0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vdup_lane_f32(a, b)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x2_t a_ = (a);                                            \
+       float32x2_t result;                                              \
+       __asm__ ("dup %0.2s,%1.s[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdup_lane_p8(a, b)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       poly8x8_t a_ = (a);                                              \
+       poly8x8_t result;                                                \
+       __asm__ ("dup %0.8b,%1.b[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdup_lane_p16(a, b)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       poly16x4_t a_ = (a);                                             \
+       poly16x4_t result;                                               \
+       __asm__ ("dup %0.4h,%1.h[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdup_lane_s8(a, b)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       int8x8_t a_ = (a);                                               \
+       int8x8_t result;                                                 \
+       __asm__ ("dup %0.8b,%1.b[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdup_lane_s16(a, b)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x4_t a_ = (a);                                              \
+       int16x4_t result;                                                \
+       __asm__ ("dup %0.4h,%1.h[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdup_lane_s32(a, b)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x2_t a_ = (a);                                              \
+       int32x2_t result;                                                \
+       __asm__ ("dup %0.2s,%1.s[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdup_lane_s64(a, b)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x1_t a_ = (a);                                              \
+       int64x1_t result;                                                \
+       __asm__ ("ins %0.d[0],%1.d[%2]"                                  \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdup_lane_u8(a, b)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       uint8x8_t a_ = (a);                                              \
+       uint8x8_t result;                                                \
+       __asm__ ("dup %0.8b,%1.b[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdup_lane_u16(a, b)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x4_t a_ = (a);                                             \
+       uint16x4_t result;                                               \
+       __asm__ ("dup %0.4h,%1.h[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdup_lane_u32(a, b)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x2_t a_ = (a);                                             \
+       uint32x2_t result;                                               \
+       __asm__ ("dup %0.2s,%1.s[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdup_lane_u64(a, b)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x1_t a_ = (a);                                             \
+       uint64x1_t result;                                               \
+       __asm__ ("ins %0.d[0],%1.d[%2]"                                  \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vdup_n_f32 (float32_t a)
+{
+  float32x2_t result;
+  __asm__ ("dup %0.2s, %w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vdup_n_p8 (uint32_t a)
+{
+  poly8x8_t result;
+  __asm__ ("dup %0.8b,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vdup_n_p16 (uint32_t a)
+{
+  poly16x4_t result;
+  __asm__ ("dup %0.4h,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vdup_n_s8 (int32_t a)
+{
+  int8x8_t result;
+  __asm__ ("dup %0.8b,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vdup_n_s16 (int32_t a)
+{
+  int16x4_t result;
+  __asm__ ("dup %0.4h,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vdup_n_s32 (int32_t a)
+{
+  int32x2_t result;
+  __asm__ ("dup %0.2s,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vdup_n_s64 (int64_t a)
+{
+  int64x1_t result;
+  __asm__ ("ins %0.d[0],%x1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vdup_n_u8 (uint32_t a)
+{
+  uint8x8_t result;
+  __asm__ ("dup %0.8b,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vdup_n_u16 (uint32_t a)
+{
+  uint16x4_t result;
+  __asm__ ("dup %0.4h,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vdup_n_u32 (uint32_t a)
+{
+  uint32x2_t result;
+  __asm__ ("dup %0.2s,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vdup_n_u64 (uint64_t a)
+{
+  uint64x1_t result;
+  __asm__ ("ins %0.d[0],%x1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vdupd_lane_f64(a, b)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x2_t a_ = (a);                                            \
+       float64_t result;                                                \
+       __asm__ ("dup %d0, %1.d[%2]"                                     \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdupq_lane_f32(a, b)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x2_t a_ = (a);                                            \
+       float32x4_t result;                                              \
+       __asm__ ("dup %0.4s,%1.s[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdupq_lane_f64(a, b)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x1_t a_ = (a);                                            \
+       float64x2_t result;                                              \
+       __asm__ ("dup %0.2d,%1.d[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdupq_lane_p8(a, b)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       poly8x8_t a_ = (a);                                              \
+       poly8x16_t result;                                               \
+       __asm__ ("dup %0.16b,%1.b[%2]"                                   \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdupq_lane_p16(a, b)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       poly16x4_t a_ = (a);                                             \
+       poly16x8_t result;                                               \
+       __asm__ ("dup %0.8h,%1.h[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdupq_lane_s8(a, b)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       int8x8_t a_ = (a);                                               \
+       int8x16_t result;                                                \
+       __asm__ ("dup %0.16b,%1.b[%2]"                                   \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdupq_lane_s16(a, b)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x4_t a_ = (a);                                              \
+       int16x8_t result;                                                \
+       __asm__ ("dup %0.8h,%1.h[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdupq_lane_s32(a, b)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x2_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("dup %0.4s,%1.s[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdupq_lane_s64(a, b)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x1_t a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("dup %0.2d,%1.d[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdupq_lane_u8(a, b)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       uint8x8_t a_ = (a);                                              \
+       uint8x16_t result;                                               \
+       __asm__ ("dup %0.16b,%1.b[%2]"                                   \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdupq_lane_u16(a, b)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x4_t a_ = (a);                                             \
+       uint16x8_t result;                                               \
+       __asm__ ("dup %0.8h,%1.h[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdupq_lane_u32(a, b)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x2_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("dup %0.4s,%1.s[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vdupq_lane_u64(a, b)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x1_t a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("dup %0.2d,%1.d[%2]"                                    \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vdupq_n_f32 (float32_t a)
+{
+  float32x4_t result;
+  __asm__ ("dup %0.4s, %w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vdupq_n_f64 (float64_t a)
+{
+  float64x2_t result;
+  __asm__ ("dup %0.2d, %x1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vdupq_n_p8 (uint32_t a)
+{
+  poly8x16_t result;
+  __asm__ ("dup %0.16b,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vdupq_n_p16 (uint32_t a)
+{
+  poly16x8_t result;
+  __asm__ ("dup %0.8h,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vdupq_n_s8 (int32_t a)
+{
+  int8x16_t result;
+  __asm__ ("dup %0.16b,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vdupq_n_s16 (int32_t a)
+{
+  int16x8_t result;
+  __asm__ ("dup %0.8h,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vdupq_n_s32 (int32_t a)
+{
+  int32x4_t result;
+  __asm__ ("dup %0.4s,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vdupq_n_s64 (int64_t a)
+{
+  int64x2_t result;
+  __asm__ ("dup %0.2d,%x1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vdupq_n_u8 (uint32_t a)
+{
+  uint8x16_t result;
+  __asm__ ("dup %0.16b,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vdupq_n_u16 (uint32_t a)
+{
+  uint16x8_t result;
+  __asm__ ("dup %0.8h,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vdupq_n_u32 (uint32_t a)
+{
+  uint32x4_t result;
+  __asm__ ("dup %0.4s,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vdupq_n_u64 (uint64_t a)
+{
+  uint64x2_t result;
+  __asm__ ("dup %0.2d,%x1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vdups_lane_f32(a, b)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t a_ = (a);                                            \
+       float32_t result;                                                \
+       __asm__ ("dup %s0, %1.s[%2]"                                     \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vext_f32(a, b, c)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x2_t b_ = (b);                                            \
+       float32x2_t a_ = (a);                                            \
+       float32x2_t result;                                              \
+       __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*4"                        \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vext_f64(a, b, c)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x1_t b_ = (b);                                            \
+       float64x1_t a_ = (a);                                            \
+       float64x1_t result;                                              \
+       __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*8"                        \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vext_p8(a, b, c)                                                \
+  __extension__                                                         \
+    ({                                                                  \
+       poly8x8_t b_ = (b);                                              \
+       poly8x8_t a_ = (a);                                              \
+       poly8x8_t result;                                                \
+       __asm__ ("ext %0.8b,%1.8b,%2.8b,%3"                              \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vext_p16(a, b, c)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       poly16x4_t b_ = (b);                                             \
+       poly16x4_t a_ = (a);                                             \
+       poly16x4_t result;                                               \
+       __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*2"                        \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vext_s8(a, b, c)                                                \
+  __extension__                                                         \
+    ({                                                                  \
+       int8x8_t b_ = (b);                                               \
+       int8x8_t a_ = (a);                                               \
+       int8x8_t result;                                                 \
+       __asm__ ("ext %0.8b,%1.8b,%2.8b,%3"                              \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vext_s16(a, b, c)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x4_t b_ = (b);                                              \
+       int16x4_t a_ = (a);                                              \
+       int16x4_t result;                                                \
+       __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*2"                        \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vext_s32(a, b, c)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x2_t b_ = (b);                                              \
+       int32x2_t a_ = (a);                                              \
+       int32x2_t result;                                                \
+       __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*4"                        \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vext_s64(a, b, c)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x1_t b_ = (b);                                              \
+       int64x1_t a_ = (a);                                              \
+       int64x1_t result;                                                \
+       __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*8"                        \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vext_u8(a, b, c)                                                \
+  __extension__                                                         \
+    ({                                                                  \
+       uint8x8_t b_ = (b);                                              \
+       uint8x8_t a_ = (a);                                              \
+       uint8x8_t result;                                                \
+       __asm__ ("ext %0.8b,%1.8b,%2.8b,%3"                              \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vext_u16(a, b, c)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x4_t b_ = (b);                                             \
+       uint16x4_t a_ = (a);                                             \
+       uint16x4_t result;                                               \
+       __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*2"                        \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vext_u32(a, b, c)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x2_t b_ = (b);                                             \
+       uint32x2_t a_ = (a);                                             \
+       uint32x2_t result;                                               \
+       __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*4"                        \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vext_u64(a, b, c)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x1_t b_ = (b);                                             \
+       uint64x1_t a_ = (a);                                             \
+       uint64x1_t result;                                               \
+       __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*8"                        \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vextq_f32(a, b, c)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t b_ = (b);                                            \
+       float32x4_t a_ = (a);                                            \
+       float32x4_t result;                                              \
+       __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*4"                     \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vextq_f64(a, b, c)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x2_t b_ = (b);                                            \
+       float64x2_t a_ = (a);                                            \
+       float64x2_t result;                                              \
+       __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*8"                     \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vextq_p8(a, b, c)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       poly8x16_t b_ = (b);                                             \
+       poly8x16_t a_ = (a);                                             \
+       poly8x16_t result;                                               \
+       __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3"                       \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vextq_p16(a, b, c)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       poly16x8_t b_ = (b);                                             \
+       poly16x8_t a_ = (a);                                             \
+       poly16x8_t result;                                               \
+       __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*2"                     \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vextq_s8(a, b, c)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       int8x16_t b_ = (b);                                              \
+       int8x16_t a_ = (a);                                              \
+       int8x16_t result;                                                \
+       __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3"                       \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vextq_s16(a, b, c)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t b_ = (b);                                              \
+       int16x8_t a_ = (a);                                              \
+       int16x8_t result;                                                \
+       __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*2"                     \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vextq_s32(a, b, c)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*4"                     \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vextq_s64(a, b, c)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x2_t b_ = (b);                                              \
+       int64x2_t a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*8"                     \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vextq_u8(a, b, c)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       uint8x16_t b_ = (b);                                             \
+       uint8x16_t a_ = (a);                                             \
+       uint8x16_t result;                                               \
+       __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3"                       \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vextq_u16(a, b, c)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t b_ = (b);                                             \
+       uint16x8_t a_ = (a);                                             \
+       uint16x8_t result;                                               \
+       __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*2"                     \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vextq_u32(a, b, c)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*4"                     \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vextq_u64(a, b, c)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x2_t b_ = (b);                                             \
+       uint64x2_t a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*8"                     \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vfma_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
+{
+  float32x2_t result;
+  __asm__ ("fmla %0.2s,%2.2s,%3.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vfma_lane_f32(a, b, c, d)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x2_t c_ = (c);                                            \
+       float32x2_t b_ = (b);                                            \
+       float32x2_t a_ = (a);                                            \
+       float32x2_t result;                                              \
+       __asm__ ("fmla %0.2s,%2.2s,%3.s[%4]"                             \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vfmad_lane_f64(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x2_t b_ = (b);                                            \
+       float64_t a_ = (a);                                              \
+       float64_t result;                                                \
+       __asm__ ("fmla %d0,%d1,%2.d[%3]"                                 \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vfmaq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
+{
+  float32x4_t result;
+  __asm__ ("fmla %0.4s,%2.4s,%3.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vfmaq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
+{
+  float64x2_t result;
+  __asm__ ("fmla %0.2d,%2.2d,%3.2d"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vfmaq_lane_f32(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t c_ = (c);                                            \
+       float32x4_t b_ = (b);                                            \
+       float32x4_t a_ = (a);                                            \
+       float32x4_t result;                                              \
+       __asm__ ("fmla %0.4s,%2.4s,%3.s[%4]"                             \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vfmaq_lane_f64(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x2_t c_ = (c);                                            \
+       float64x2_t b_ = (b);                                            \
+       float64x2_t a_ = (a);                                            \
+       float64x2_t result;                                              \
+       __asm__ ("fmla %0.2d,%2.2d,%3.d[%4]"                             \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vfmas_lane_f32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t b_ = (b);                                            \
+       float32_t a_ = (a);                                              \
+       float32_t result;                                                \
+       __asm__ ("fmla %s0,%s1,%2.s[%3]"                                 \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vfma_n_f32 (float32x2_t a, float32x2_t b, float32_t c)
+{
+  float32x2_t result;
+  __asm__ ("fmla %0.2s, %2.2s, %3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vfmaq_n_f32 (float32x4_t a, float32x4_t b, float32_t c)
+{
+  float32x4_t result;
+  __asm__ ("fmla %0.4s, %2.4s, %3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vfmaq_n_f64 (float64x2_t a, float64x2_t b, float64_t c)
+{
+  float64x2_t result;
+  __asm__ ("fmla %0.2d, %2.2d, %3.d[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vfms_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
+{
+  float32x2_t result;
+  __asm__ ("fmls %0.2s,%2.2s,%3.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vfmsd_lane_f64(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x2_t b_ = (b);                                            \
+       float64_t a_ = (a);                                              \
+       float64_t result;                                                \
+       __asm__ ("fmls %d0,%d1,%2.d[%3]"                                 \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vfmsq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
+{
+  float32x4_t result;
+  __asm__ ("fmls %0.4s,%2.4s,%3.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vfmsq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
+{
+  float64x2_t result;
+  __asm__ ("fmls %0.2d,%2.2d,%3.2d"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vfmss_lane_f32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t b_ = (b);                                            \
+       float32_t a_ = (a);                                              \
+       float32_t result;                                                \
+       __asm__ ("fmls %s0,%s1,%2.s[%3]"                                 \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vget_high_f32 (float32x4_t a)
+{
+  float32x2_t result;
+  __asm__ ("ins %0.d[0], %1.d[1]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vget_high_f64 (float64x2_t a)
+{
+  float64x1_t result;
+  __asm__ ("ins %0.d[0], %1.d[1]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vget_high_p8 (poly8x16_t a)
+{
+  poly8x8_t result;
+  __asm__ ("ins %0.d[0], %1.d[1]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vget_high_p16 (poly16x8_t a)
+{
+  poly16x4_t result;
+  __asm__ ("ins %0.d[0], %1.d[1]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vget_high_s8 (int8x16_t a)
+{
+  int8x8_t result;
+  __asm__ ("ins %0.d[0], %1.d[1]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vget_high_s16 (int16x8_t a)
+{
+  int16x4_t result;
+  __asm__ ("ins %0.d[0], %1.d[1]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vget_high_s32 (int32x4_t a)
+{
+  int32x2_t result;
+  __asm__ ("ins %0.d[0], %1.d[1]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vget_high_s64 (int64x2_t a)
+{
+  int64x1_t result;
+  __asm__ ("ins %0.d[0], %1.d[1]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vget_high_u8 (uint8x16_t a)
+{
+  uint8x8_t result;
+  __asm__ ("ins %0.d[0], %1.d[1]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vget_high_u16 (uint16x8_t a)
+{
+  uint16x4_t result;
+  __asm__ ("ins %0.d[0], %1.d[1]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vget_high_u32 (uint32x4_t a)
+{
+  uint32x2_t result;
+  __asm__ ("ins %0.d[0], %1.d[1]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vget_high_u64 (uint64x2_t a)
+{
+  uint64x1_t result;
+  __asm__ ("ins %0.d[0], %1.d[1]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vget_lane_f64(a, b)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x1_t a_ = (a);                                            \
+       float64_t result;                                                \
+       __asm__ ("umov %x0, %1.d[%2]"                                    \
+                : "=r"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vget_low_f32 (float32x4_t a)
+{
+  float32x2_t result;
+  __asm__ ("ins %0.d[0], %1.d[0]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vget_low_f64 (float64x2_t a)
+{
+  float64x1_t result;
+  __asm__ ("ins %0.d[0], %1.d[0]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vget_low_p8 (poly8x16_t a)
+{
+  poly8x8_t result;
+  __asm__ ("ins %0.d[0], %1.d[0]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vget_low_p16 (poly16x8_t a)
+{
+  poly16x4_t result;
+  __asm__ ("ins %0.d[0], %1.d[0]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vget_low_s8 (int8x16_t a)
+{
+  int8x8_t result;
+  __asm__ ("ins %0.d[0], %1.d[0]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vget_low_s16 (int16x8_t a)
+{
+  int16x4_t result;
+  __asm__ ("ins %0.d[0], %1.d[0]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vget_low_s32 (int32x4_t a)
+{
+  int32x2_t result;
+  __asm__ ("ins %0.d[0], %1.d[0]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vget_low_s64 (int64x2_t a)
+{
+  int64x1_t result;
+  __asm__ ("ins %0.d[0], %1.d[0]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vget_low_u8 (uint8x16_t a)
+{
+  uint8x8_t result;
+  __asm__ ("ins %0.d[0], %1.d[0]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vget_low_u16 (uint16x8_t a)
+{
+  uint16x4_t result;
+  __asm__ ("ins %0.d[0], %1.d[0]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vget_low_u32 (uint32x4_t a)
+{
+  uint32x2_t result;
+  __asm__ ("ins %0.d[0], %1.d[0]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vget_low_u64 (uint64x2_t a)
+{
+  uint64x1_t result;
+  __asm__ ("ins %0.d[0], %1.d[0]"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vhsub_s8 (int8x8_t a, int8x8_t b)
+{
+  int8x8_t result;
+  __asm__ ("shsub %0.8b, %1.8b, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vhsub_s16 (int16x4_t a, int16x4_t b)
+{
+  int16x4_t result;
+  __asm__ ("shsub %0.4h, %1.4h, %2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vhsub_s32 (int32x2_t a, int32x2_t b)
+{
+  int32x2_t result;
+  __asm__ ("shsub %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vhsub_u8 (uint8x8_t a, uint8x8_t b)
+{
+  uint8x8_t result;
+  __asm__ ("uhsub %0.8b, %1.8b, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vhsub_u16 (uint16x4_t a, uint16x4_t b)
+{
+  uint16x4_t result;
+  __asm__ ("uhsub %0.4h, %1.4h, %2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vhsub_u32 (uint32x2_t a, uint32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("uhsub %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vhsubq_s8 (int8x16_t a, int8x16_t b)
+{
+  int8x16_t result;
+  __asm__ ("shsub %0.16b, %1.16b, %2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vhsubq_s16 (int16x8_t a, int16x8_t b)
+{
+  int16x8_t result;
+  __asm__ ("shsub %0.8h, %1.8h, %2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vhsubq_s32 (int32x4_t a, int32x4_t b)
+{
+  int32x4_t result;
+  __asm__ ("shsub %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vhsubq_u8 (uint8x16_t a, uint8x16_t b)
+{
+  uint8x16_t result;
+  __asm__ ("uhsub %0.16b, %1.16b, %2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vhsubq_u16 (uint16x8_t a, uint16x8_t b)
+{
+  uint16x8_t result;
+  __asm__ ("uhsub %0.8h, %1.8h, %2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vhsubq_u32 (uint32x4_t a, uint32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("uhsub %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vld1_dup_f32 (float32_t * a)
+{
+  float32x2_t result;
+  __asm__ ("ld1r {%0.2s},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vld1_dup_f64 (float64_t * a)
+{
+  float64x1_t result;
+  __asm__ ("ld1 {%0.1d},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vld1_dup_p8 (poly8_t * a)
+{
+  poly8x8_t result;
+  __asm__ ("ld1r {%0.8b},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vld1_dup_p16 (poly16_t * a)
+{
+  poly16x4_t result;
+  __asm__ ("ld1r {%0.4h},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vld1_dup_s8 (int8_t * a)
+{
+  int8x8_t result;
+  __asm__ ("ld1r {%0.8b},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vld1_dup_s16 (int16_t * a)
+{
+  int16x4_t result;
+  __asm__ ("ld1r {%0.4h},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vld1_dup_s32 (int32_t * a)
+{
+  int32x2_t result;
+  __asm__ ("ld1r {%0.2s},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vld1_dup_s64 (int64_t * a)
+{
+  int64x1_t result;
+  __asm__ ("ld1 {%0.1d},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vld1_dup_u8 (uint8_t * a)
+{
+  uint8x8_t result;
+  __asm__ ("ld1r {%0.8b},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vld1_dup_u16 (uint16_t * a)
+{
+  uint16x4_t result;
+  __asm__ ("ld1r {%0.4h},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vld1_dup_u32 (uint32_t * a)
+{
+  uint32x2_t result;
+  __asm__ ("ld1r {%0.2s},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vld1_dup_u64 (uint64_t * a)
+{
+  uint64x1_t result;
+  __asm__ ("ld1 {%0.1d},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vld1_f32 (float32_t * a)
+{
+  float32x2_t result;
+  __asm__ ("ld1 {%0.2s},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vld1_f64 (float64_t * a)
+{
+  float64x1_t result;
+  __asm__ ("ld1 {%0.1d},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+#define vld1_lane_f32(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x2_t b_ = (b);                                            \
+       float32_t * a_ = (a);                                            \
+       float32x2_t result;                                              \
+       __asm__ ("ld1 {%0.s}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1_lane_f64(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x1_t b_ = (b);                                            \
+       float64_t * a_ = (a);                                            \
+       float64x1_t result;                                              \
+       __asm__ ("ld1 {%0.d}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1_lane_p8(a, b, c)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       poly8x8_t b_ = (b);                                              \
+       poly8_t * a_ = (a);                                              \
+       poly8x8_t result;                                                \
+       __asm__ ("ld1 {%0.b}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1_lane_p16(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       poly16x4_t b_ = (b);                                             \
+       poly16_t * a_ = (a);                                             \
+       poly16x4_t result;                                               \
+       __asm__ ("ld1 {%0.h}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1_lane_s8(a, b, c)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       int8x8_t b_ = (b);                                               \
+       int8_t * a_ = (a);                                               \
+       int8x8_t result;                                                 \
+       __asm__ ("ld1 {%0.b}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1_lane_s16(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x4_t b_ = (b);                                              \
+       int16_t * a_ = (a);                                              \
+       int16x4_t result;                                                \
+       __asm__ ("ld1 {%0.h}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1_lane_s32(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x2_t b_ = (b);                                              \
+       int32_t * a_ = (a);                                              \
+       int32x2_t result;                                                \
+       __asm__ ("ld1 {%0.s}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1_lane_s64(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x1_t b_ = (b);                                              \
+       int64_t * a_ = (a);                                              \
+       int64x1_t result;                                                \
+       __asm__ ("ld1 {%0.d}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1_lane_u8(a, b, c)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       uint8x8_t b_ = (b);                                              \
+       uint8_t * a_ = (a);                                              \
+       uint8x8_t result;                                                \
+       __asm__ ("ld1 {%0.b}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1_lane_u16(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x4_t b_ = (b);                                             \
+       uint16_t * a_ = (a);                                             \
+       uint16x4_t result;                                               \
+       __asm__ ("ld1 {%0.h}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1_lane_u32(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x2_t b_ = (b);                                             \
+       uint32_t * a_ = (a);                                             \
+       uint32x2_t result;                                               \
+       __asm__ ("ld1 {%0.s}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1_lane_u64(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x1_t b_ = (b);                                             \
+       uint64_t * a_ = (a);                                             \
+       uint64x1_t result;                                               \
+       __asm__ ("ld1 {%0.d}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vld1_p8 (poly8_t * a)
+{
+  poly8x8_t result;
+  __asm__ ("ld1 {%0.8b}, [%1]"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vld1_p16 (poly16_t * a)
+{
+  poly16x4_t result;
+  __asm__ ("ld1 {%0.4h}, [%1]"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vld1_s8 (int8_t * a)
+{
+  int8x8_t result;
+  __asm__ ("ld1 {%0.8b},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vld1_s16 (int16_t * a)
+{
+  int16x4_t result;
+  __asm__ ("ld1 {%0.4h},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vld1_s32 (int32_t * a)
+{
+  int32x2_t result;
+  __asm__ ("ld1 {%0.2s},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vld1_s64 (int64_t * a)
+{
+  int64x1_t result;
+  __asm__ ("ld1 {%0.1d},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vld1_u8 (uint8_t * a)
+{
+  uint8x8_t result;
+  __asm__ ("ld1 {%0.8b},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vld1_u16 (uint16_t * a)
+{
+  uint16x4_t result;
+  __asm__ ("ld1 {%0.4h},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vld1_u32 (uint32_t * a)
+{
+  uint32x2_t result;
+  __asm__ ("ld1 {%0.2s},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vld1_u64 (uint64_t * a)
+{
+  uint64x1_t result;
+  __asm__ ("ld1 {%0.1d},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_f32 (float32_t * a)
+{
+  float32x4_t result;
+  __asm__ ("ld1r {%0.4s},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_f64 (float64_t * a)
+{
+  float64x2_t result;
+  __asm__ ("ld1r {%0.2d},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_p8 (poly8_t * a)
+{
+  poly8x16_t result;
+  __asm__ ("ld1r {%0.16b},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_p16 (poly16_t * a)
+{
+  poly16x8_t result;
+  __asm__ ("ld1r {%0.8h},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_s8 (int8_t * a)
+{
+  int8x16_t result;
+  __asm__ ("ld1r {%0.16b},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_s16 (int16_t * a)
+{
+  int16x8_t result;
+  __asm__ ("ld1r {%0.8h},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_s32 (int32_t * a)
+{
+  int32x4_t result;
+  __asm__ ("ld1r {%0.4s},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_s64 (int64_t * a)
+{
+  int64x2_t result;
+  __asm__ ("ld1r {%0.2d},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_u8 (uint8_t * a)
+{
+  uint8x16_t result;
+  __asm__ ("ld1r {%0.16b},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_u16 (uint16_t * a)
+{
+  uint16x8_t result;
+  __asm__ ("ld1r {%0.8h},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_u32 (uint32_t * a)
+{
+  uint32x4_t result;
+  __asm__ ("ld1r {%0.4s},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_u64 (uint64_t * a)
+{
+  uint64x2_t result;
+  __asm__ ("ld1r {%0.2d},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vld1q_f32 (float32_t * a)
+{
+  float32x4_t result;
+  __asm__ ("ld1 {%0.4s},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vld1q_f64 (float64_t * a)
+{
+  float64x2_t result;
+  __asm__ ("ld1 {%0.2d},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+#define vld1q_lane_f32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t b_ = (b);                                            \
+       float32_t * a_ = (a);                                            \
+       float32x4_t result;                                              \
+       __asm__ ("ld1 {%0.s}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1q_lane_f64(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x2_t b_ = (b);                                            \
+       float64_t * a_ = (a);                                            \
+       float64x2_t result;                                              \
+       __asm__ ("ld1 {%0.d}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1q_lane_p8(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       poly8x16_t b_ = (b);                                             \
+       poly8_t * a_ = (a);                                              \
+       poly8x16_t result;                                               \
+       __asm__ ("ld1 {%0.b}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1q_lane_p16(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       poly16x8_t b_ = (b);                                             \
+       poly16_t * a_ = (a);                                             \
+       poly16x8_t result;                                               \
+       __asm__ ("ld1 {%0.h}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1q_lane_s8(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       int8x16_t b_ = (b);                                              \
+       int8_t * a_ = (a);                                               \
+       int8x16_t result;                                                \
+       __asm__ ("ld1 {%0.b}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1q_lane_s16(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t b_ = (b);                                              \
+       int16_t * a_ = (a);                                              \
+       int16x8_t result;                                                \
+       __asm__ ("ld1 {%0.h}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1q_lane_s32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t b_ = (b);                                              \
+       int32_t * a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("ld1 {%0.s}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1q_lane_s64(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x2_t b_ = (b);                                              \
+       int64_t * a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("ld1 {%0.d}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1q_lane_u8(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       uint8x16_t b_ = (b);                                             \
+       uint8_t * a_ = (a);                                              \
+       uint8x16_t result;                                               \
+       __asm__ ("ld1 {%0.b}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1q_lane_u16(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t b_ = (b);                                             \
+       uint16_t * a_ = (a);                                             \
+       uint16x8_t result;                                               \
+       __asm__ ("ld1 {%0.h}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1q_lane_u32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t b_ = (b);                                             \
+       uint32_t * a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("ld1 {%0.s}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vld1q_lane_u64(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x2_t b_ = (b);                                             \
+       uint64_t * a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("ld1 {%0.d}[%3],[%1]"                                   \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vld1q_p8 (poly8_t * a)
+{
+  poly8x16_t result;
+  __asm__ ("ld1 {%0.16b},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vld1q_p16 (poly16_t * a)
+{
+  poly16x8_t result;
+  __asm__ ("ld1 {%0.8h},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vld1q_s8 (int8_t * a)
+{
+  int8x16_t result;
+  __asm__ ("ld1 {%0.16b},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vld1q_s16 (int16_t * a)
+{
+  int16x8_t result;
+  __asm__ ("ld1 {%0.8h},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vld1q_s32 (int32_t * a)
+{
+  int32x4_t result;
+  __asm__ ("ld1 {%0.4s},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vld1q_s64 (int64_t * a)
+{
+  int64x2_t result;
+  __asm__ ("ld1 {%0.2d},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vld1q_u8 (uint8_t * a)
+{
+  uint8x16_t result;
+  __asm__ ("ld1 {%0.16b},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vld1q_u16 (uint16_t * a)
+{
+  uint16x8_t result;
+  __asm__ ("ld1 {%0.8h},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vld1q_u32 (uint32_t * a)
+{
+  uint32x4_t result;
+  __asm__ ("ld1 {%0.4s},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vld1q_u64 (uint64_t * a)
+{
+  uint64x2_t result;
+  __asm__ ("ld1 {%0.2d},[%1]"
+           : "=w"(result)
+           : "r"(a)
+           : "memory");
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmaxnm_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("fmaxnm %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmaxnmq_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("fmaxnm %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmaxnmq_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("fmaxnm %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vmaxnmvq_f32 (float32x4_t a)
+{
+  float32_t result;
+  __asm__ ("fmaxnmv %s0,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vmaxv_s8 (int8x8_t a)
+{
+  int8_t result;
+  __asm__ ("smaxv %b0,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vmaxv_s16 (int16x4_t a)
+{
+  int16_t result;
+  __asm__ ("smaxv %h0,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vmaxv_u8 (uint8x8_t a)
+{
+  uint8_t result;
+  __asm__ ("umaxv %b0,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vmaxv_u16 (uint16x4_t a)
+{
+  uint16_t result;
+  __asm__ ("umaxv %h0,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vmaxvq_f32 (float32x4_t a)
+{
+  float32_t result;
+  __asm__ ("fmaxv %s0,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vmaxvq_s8 (int8x16_t a)
+{
+  int8_t result;
+  __asm__ ("smaxv %b0,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vmaxvq_s16 (int16x8_t a)
+{
+  int16_t result;
+  __asm__ ("smaxv %h0,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vmaxvq_s32 (int32x4_t a)
+{
+  int32_t result;
+  __asm__ ("smaxv %s0,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vmaxvq_u8 (uint8x16_t a)
+{
+  uint8_t result;
+  __asm__ ("umaxv %b0,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vmaxvq_u16 (uint16x8_t a)
+{
+  uint16_t result;
+  __asm__ ("umaxv %h0,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vmaxvq_u32 (uint32x4_t a)
+{
+  uint32_t result;
+  __asm__ ("umaxv %s0,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vminnmvq_f32 (float32x4_t a)
+{
+  float32_t result;
+  __asm__ ("fminnmv %s0,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vminv_s8 (int8x8_t a)
+{
+  int8_t result;
+  __asm__ ("sminv %b0,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vminv_s16 (int16x4_t a)
+{
+  int16_t result;
+  __asm__ ("sminv %h0,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vminv_u8 (uint8x8_t a)
+{
+  uint8_t result;
+  __asm__ ("uminv %b0,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vminv_u16 (uint16x4_t a)
+{
+  uint16_t result;
+  __asm__ ("uminv %h0,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vminvq_f32 (float32x4_t a)
+{
+  float32_t result;
+  __asm__ ("fminv %s0,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vminvq_s8 (int8x16_t a)
+{
+  int8_t result;
+  __asm__ ("sminv %b0,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vminvq_s16 (int16x8_t a)
+{
+  int16_t result;
+  __asm__ ("sminv %h0,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vminvq_s32 (int32x4_t a)
+{
+  int32_t result;
+  __asm__ ("sminv %s0,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vminvq_u8 (uint8x16_t a)
+{
+  uint8_t result;
+  __asm__ ("uminv %b0,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vminvq_u16 (uint16x8_t a)
+{
+  uint16_t result;
+  __asm__ ("uminv %h0,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vminvq_u32 (uint32x4_t a)
+{
+  uint32_t result;
+  __asm__ ("uminv %s0,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vmla_lane_f32(a, b, c, d)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x2_t c_ = (c);                                            \
+       float32x2_t b_ = (b);                                            \
+       float32x2_t a_ = (a);                                            \
+       float32x2_t result;                                              \
+       float32x2_t t1;                                                  \
+       __asm__ ("fmul %1.2s, %3.2s, %4.s[%5]; fadd %0.2s, %0.2s, %1.2s" \
+                : "=w"(result), "=w"(t1)                                \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmla_lane_s16(a, b, c, d)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x4_t c_ = (c);                                              \
+       int16x4_t b_ = (b);                                              \
+       int16x4_t a_ = (a);                                              \
+       int16x4_t result;                                                \
+       __asm__ ("mla %0.4h, %2.4h, %3.h[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmla_lane_s32(a, b, c, d)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x2_t c_ = (c);                                              \
+       int32x2_t b_ = (b);                                              \
+       int32x2_t a_ = (a);                                              \
+       int32x2_t result;                                                \
+       __asm__ ("mla %0.2s, %2.2s, %3.s[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmla_lane_u16(a, b, c, d)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x4_t c_ = (c);                                             \
+       uint16x4_t b_ = (b);                                             \
+       uint16x4_t a_ = (a);                                             \
+       uint16x4_t result;                                               \
+       __asm__ ("mla %0.4h, %2.4h, %3.h[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmla_lane_u32(a, b, c, d)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x2_t c_ = (c);                                             \
+       uint32x2_t b_ = (b);                                             \
+       uint32x2_t a_ = (a);                                             \
+       uint32x2_t result;                                               \
+       __asm__ ("mla %0.2s, %2.2s, %3.s[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmla_laneq_s16(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t c_ = (c);                                              \
+       int16x4_t b_ = (b);                                              \
+       int16x4_t a_ = (a);                                              \
+       int16x4_t result;                                                \
+       __asm__ ("mla %0.4h, %2.4h, %3.h[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmla_laneq_s32(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t c_ = (c);                                              \
+       int32x2_t b_ = (b);                                              \
+       int32x2_t a_ = (a);                                              \
+       int32x2_t result;                                                \
+       __asm__ ("mla %0.2s, %2.2s, %3.s[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmla_laneq_u16(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t c_ = (c);                                             \
+       uint16x4_t b_ = (b);                                             \
+       uint16x4_t a_ = (a);                                             \
+       uint16x4_t result;                                               \
+       __asm__ ("mla %0.4h, %2.4h, %3.h[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmla_laneq_u32(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t c_ = (c);                                             \
+       uint32x2_t b_ = (b);                                             \
+       uint32x2_t a_ = (a);                                             \
+       uint32x2_t result;                                               \
+       __asm__ ("mla %0.2s, %2.2s, %3.s[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_n_f32 (float32x2_t a, float32x2_t b, float32_t c)
+{
+  float32x2_t result;
+  float32x2_t t1;
+  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fadd %0.2s, %0.2s, %1.2s"
+           : "=w"(result), "=w"(t1)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_n_s16 (int16x4_t a, int16x4_t b, int16_t c)
+{
+  int16x4_t result;
+  __asm__ ("mla %0.4h,%2.4h,%3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_n_s32 (int32x2_t a, int32x2_t b, int32_t c)
+{
+  int32x2_t result;
+  __asm__ ("mla %0.2s,%2.2s,%3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c)
+{
+  uint16x4_t result;
+  __asm__ ("mla %0.4h,%2.4h,%3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c)
+{
+  uint32x2_t result;
+  __asm__ ("mla %0.2s,%2.2s,%3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmla_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
+{
+  int8x8_t result;
+  __asm__ ("mla %0.8b, %2.8b, %3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
+{
+  int16x4_t result;
+  __asm__ ("mla %0.4h, %2.4h, %3.4h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
+{
+  int32x2_t result;
+  __asm__ ("mla %0.2s, %2.2s, %3.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmla_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
+{
+  uint8x8_t result;
+  __asm__ ("mla %0.8b, %2.8b, %3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
+{
+  uint16x4_t result;
+  __asm__ ("mla %0.4h, %2.4h, %3.4h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
+{
+  uint32x2_t result;
+  __asm__ ("mla %0.2s, %2.2s, %3.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vmlal_high_lane_s16(a, b, c, d)                                 \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t c_ = (c);                                              \
+       int16x8_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("smlal2 %0.4s, %2.8h, %3.h[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlal_high_lane_s32(a, b, c, d)                                 \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t c_ = (c);                                              \
+       int32x4_t b_ = (b);                                              \
+       int64x2_t a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("smlal2 %0.2d, %2.4s, %3.s[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlal_high_lane_u16(a, b, c, d)                                 \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t c_ = (c);                                             \
+       uint16x8_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("umlal2 %0.4s, %2.8h, %3.h[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlal_high_lane_u32(a, b, c, d)                                 \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t c_ = (c);                                             \
+       uint32x4_t b_ = (b);                                             \
+       uint64x2_t a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("umlal2 %0.2d, %2.4s, %3.s[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlal_high_laneq_s16(a, b, c, d)                                \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t c_ = (c);                                              \
+       int16x8_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("smlal2 %0.4s, %2.8h, %3.h[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlal_high_laneq_s32(a, b, c, d)                                \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t c_ = (c);                                              \
+       int32x4_t b_ = (b);                                              \
+       int64x2_t a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("smlal2 %0.2d, %2.4s, %3.s[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlal_high_laneq_u16(a, b, c, d)                                \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t c_ = (c);                                             \
+       uint16x8_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("umlal2 %0.4s, %2.8h, %3.h[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlal_high_laneq_u32(a, b, c, d)                                \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t c_ = (c);                                             \
+       uint32x4_t b_ = (b);                                             \
+       uint64x2_t a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("umlal2 %0.2d, %2.4s, %3.s[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c)
+{
+  int32x4_t result;
+  __asm__ ("smlal2 %0.4s,%2.8h,%3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c)
+{
+  int64x2_t result;
+  __asm__ ("smlal2 %0.2d,%2.4s,%3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_high_n_u16 (uint32x4_t a, uint16x8_t b, uint16_t c)
+{
+  uint32x4_t result;
+  __asm__ ("umlal2 %0.4s,%2.8h,%3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_high_n_u32 (uint64x2_t a, uint32x4_t b, uint32_t c)
+{
+  uint64x2_t result;
+  __asm__ ("umlal2 %0.2d,%2.4s,%3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
+{
+  int16x8_t result;
+  __asm__ ("smlal2 %0.8h,%2.16b,%3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
+{
+  int32x4_t result;
+  __asm__ ("smlal2 %0.4s,%2.8h,%3.8h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
+{
+  int64x2_t result;
+  __asm__ ("smlal2 %0.2d,%2.4s,%3.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
+{
+  uint16x8_t result;
+  __asm__ ("umlal2 %0.8h,%2.16b,%3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
+{
+  uint32x4_t result;
+  __asm__ ("umlal2 %0.4s,%2.8h,%3.8h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
+{
+  uint64x2_t result;
+  __asm__ ("umlal2 %0.2d,%2.4s,%3.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vmlal_lane_s16(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x4_t c_ = (c);                                              \
+       int16x4_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("smlal %0.4s,%2.4h,%3.h[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlal_lane_s32(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x2_t c_ = (c);                                              \
+       int32x2_t b_ = (b);                                              \
+       int64x2_t a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("smlal %0.2d,%2.2s,%3.s[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlal_lane_u16(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x4_t c_ = (c);                                             \
+       uint16x4_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("umlal %0.4s,%2.4h,%3.h[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlal_lane_u32(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x2_t c_ = (c);                                             \
+       uint32x2_t b_ = (b);                                             \
+       uint64x2_t a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("umlal %0.2d, %2.2s, %3.s[%4]"                          \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlal_laneq_s16(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t c_ = (c);                                              \
+       int16x4_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("smlal %0.4s, %2.4h, %3.h[%4]"                          \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlal_laneq_s32(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t c_ = (c);                                              \
+       int32x2_t b_ = (b);                                              \
+       int64x2_t a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("smlal %0.2d, %2.2s, %3.s[%4]"                          \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlal_laneq_u16(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t c_ = (c);                                             \
+       uint16x4_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("umlal %0.4s, %2.4h, %3.h[%4]"                          \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlal_laneq_u32(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t c_ = (c);                                             \
+       uint32x2_t b_ = (b);                                             \
+       uint64x2_t a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("umlal %0.2d, %2.2s, %3.s[%4]"                          \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_n_s16 (int32x4_t a, int16x4_t b, int16_t c)
+{
+  int32x4_t result;
+  __asm__ ("smlal %0.4s,%2.4h,%3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_n_s32 (int64x2_t a, int32x2_t b, int32_t c)
+{
+  int64x2_t result;
+  __asm__ ("smlal %0.2d,%2.2s,%3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_n_u16 (uint32x4_t a, uint16x4_t b, uint16_t c)
+{
+  uint32x4_t result;
+  __asm__ ("umlal %0.4s,%2.4h,%3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_n_u32 (uint64x2_t a, uint32x2_t b, uint32_t c)
+{
+  uint64x2_t result;
+  __asm__ ("umlal %0.2d,%2.2s,%3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlal_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
+{
+  int16x8_t result;
+  __asm__ ("smlal %0.8h,%2.8b,%3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
+{
+  int32x4_t result;
+  __asm__ ("smlal %0.4s,%2.4h,%3.4h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
+{
+  int64x2_t result;
+  __asm__ ("smlal %0.2d,%2.2s,%3.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
+{
+  uint16x8_t result;
+  __asm__ ("umlal %0.8h,%2.8b,%3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
+{
+  uint32x4_t result;
+  __asm__ ("umlal %0.4s,%2.4h,%3.4h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
+{
+  uint64x2_t result;
+  __asm__ ("umlal %0.2d,%2.2s,%3.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vmlaq_lane_f32(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t c_ = (c);                                            \
+       float32x4_t b_ = (b);                                            \
+       float32x4_t a_ = (a);                                            \
+       float32x4_t result;                                              \
+       float32x4_t t1;                                                  \
+       __asm__ ("fmul %1.4s, %3.4s, %4.s[%5]; fadd %0.4s, %0.4s, %1.4s" \
+                : "=w"(result), "=w"(t1)                                \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlaq_lane_s16(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t c_ = (c);                                              \
+       int16x8_t b_ = (b);                                              \
+       int16x8_t a_ = (a);                                              \
+       int16x8_t result;                                                \
+       __asm__ ("mla %0.8h, %2.8h, %3.h[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlaq_lane_s32(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t c_ = (c);                                              \
+       int32x4_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("mla %0.4s, %2.4s, %3.s[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlaq_lane_u16(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t c_ = (c);                                             \
+       uint16x8_t b_ = (b);                                             \
+       uint16x8_t a_ = (a);                                             \
+       uint16x8_t result;                                               \
+       __asm__ ("mla %0.8h, %2.8h, %3.h[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlaq_lane_u32(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t c_ = (c);                                             \
+       uint32x4_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("mla %0.4s, %2.4s, %3.s[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlaq_laneq_s16(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t c_ = (c);                                              \
+       int16x8_t b_ = (b);                                              \
+       int16x8_t a_ = (a);                                              \
+       int16x8_t result;                                                \
+       __asm__ ("mla %0.8h, %2.8h, %3.h[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlaq_laneq_s32(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t c_ = (c);                                              \
+       int32x4_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("mla %0.4s, %2.4s, %3.s[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlaq_laneq_u16(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t c_ = (c);                                             \
+       uint16x8_t b_ = (b);                                             \
+       uint16x8_t a_ = (a);                                             \
+       uint16x8_t result;                                               \
+       __asm__ ("mla %0.8h, %2.8h, %3.h[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlaq_laneq_u32(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t c_ = (c);                                             \
+       uint32x4_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("mla %0.4s, %2.4s, %3.s[%4]"                            \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_f32 (float32x4_t a, float32x4_t b, float32_t c)
+{
+  float32x4_t result;
+  float32x4_t t1;
+  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fadd %0.4s, %0.4s, %1.4s"
+           : "=w"(result), "=w"(t1)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmlaq_n_f64 (float64x2_t a, float64x2_t b, float64_t c)
+{
+  float64x2_t result;
+  float64x2_t t1;
+  __asm__ ("fmul %1.2d, %3.2d, %4.d[0]; fadd %0.2d, %0.2d, %1.2d"
+           : "=w"(result), "=w"(t1)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_n_s16 (int16x8_t a, int16x8_t b, int16_t c)
+{
+  int16x8_t result;
+  __asm__ ("mla %0.8h,%2.8h,%3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_s32 (int32x4_t a, int32x4_t b, int32_t c)
+{
+  int32x4_t result;
+  __asm__ ("mla %0.4s,%2.4s,%3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c)
+{
+  uint16x8_t result;
+  __asm__ ("mla %0.8h,%2.8h,%3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c)
+{
+  uint32x4_t result;
+  __asm__ ("mla %0.4s,%2.4s,%3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmlaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
+{
+  int8x16_t result;
+  __asm__ ("mla %0.16b, %2.16b, %3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
+{
+  int16x8_t result;
+  __asm__ ("mla %0.8h, %2.8h, %3.8h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
+{
+  int32x4_t result;
+  __asm__ ("mla %0.4s, %2.4s, %3.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmlaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
+{
+  uint8x16_t result;
+  __asm__ ("mla %0.16b, %2.16b, %3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
+{
+  uint16x8_t result;
+  __asm__ ("mla %0.8h, %2.8h, %3.8h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
+{
+  uint32x4_t result;
+  __asm__ ("mla %0.4s, %2.4s, %3.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vmls_lane_f32(a, b, c, d)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x2_t c_ = (c);                                            \
+       float32x2_t b_ = (b);                                            \
+       float32x2_t a_ = (a);                                            \
+       float32x2_t result;                                              \
+       float32x2_t t1;                                                  \
+       __asm__ ("fmul %1.2s, %3.2s, %4.s[%5]; fsub %0.2s, %0.2s, %1.2s" \
+                : "=w"(result), "=w"(t1)                                \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmls_lane_s16(a, b, c, d)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x4_t c_ = (c);                                              \
+       int16x4_t b_ = (b);                                              \
+       int16x4_t a_ = (a);                                              \
+       int16x4_t result;                                                \
+       __asm__ ("mls %0.4h,%2.4h,%3.h[%4]"                              \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmls_lane_s32(a, b, c, d)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x2_t c_ = (c);                                              \
+       int32x2_t b_ = (b);                                              \
+       int32x2_t a_ = (a);                                              \
+       int32x2_t result;                                                \
+       __asm__ ("mls %0.2s,%2.2s,%3.s[%4]"                              \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmls_lane_u16(a, b, c, d)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x4_t c_ = (c);                                             \
+       uint16x4_t b_ = (b);                                             \
+       uint16x4_t a_ = (a);                                             \
+       uint16x4_t result;                                               \
+       __asm__ ("mls %0.4h,%2.4h,%3.h[%4]"                              \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmls_lane_u32(a, b, c, d)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x2_t c_ = (c);                                             \
+       uint32x2_t b_ = (b);                                             \
+       uint32x2_t a_ = (a);                                             \
+       uint32x2_t result;                                               \
+       __asm__ ("mls %0.2s,%2.2s,%3.s[%4]"                              \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_n_f32 (float32x2_t a, float32x2_t b, float32_t c)
+{
+  float32x2_t result;
+  float32x2_t t1;
+  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fsub %0.2s, %0.2s, %1.2s"
+           : "=w"(result), "=w"(t1)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_n_s16 (int16x4_t a, int16x4_t b, int16_t c)
+{
+  int16x4_t result;
+  __asm__ ("mls %0.4h, %2.4h, %3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_n_s32 (int32x2_t a, int32x2_t b, int32_t c)
+{
+  int32x2_t result;
+  __asm__ ("mls %0.2s, %2.2s, %3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c)
+{
+  uint16x4_t result;
+  __asm__ ("mls %0.4h, %2.4h, %3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c)
+{
+  uint32x2_t result;
+  __asm__ ("mls %0.2s, %2.2s, %3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmls_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
+{
+  int8x8_t result;
+  __asm__ ("mls %0.8b,%2.8b,%3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
+{
+  int16x4_t result;
+  __asm__ ("mls %0.4h,%2.4h,%3.4h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
+{
+  int32x2_t result;
+  __asm__ ("mls %0.2s,%2.2s,%3.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmls_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
+{
+  uint8x8_t result;
+  __asm__ ("mls %0.8b,%2.8b,%3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
+{
+  uint16x4_t result;
+  __asm__ ("mls %0.4h,%2.4h,%3.4h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
+{
+  uint32x2_t result;
+  __asm__ ("mls %0.2s,%2.2s,%3.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vmlsl_high_lane_s16(a, b, c, d)                                 \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t c_ = (c);                                              \
+       int16x8_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsl_high_lane_s32(a, b, c, d)                                 \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t c_ = (c);                                              \
+       int32x4_t b_ = (b);                                              \
+       int64x2_t a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsl_high_lane_u16(a, b, c, d)                                 \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t c_ = (c);                                             \
+       uint16x8_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsl_high_lane_u32(a, b, c, d)                                 \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t c_ = (c);                                             \
+       uint32x4_t b_ = (b);                                             \
+       uint64x2_t a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsl_high_laneq_s16(a, b, c, d)                                \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t c_ = (c);                                              \
+       int16x8_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsl_high_laneq_s32(a, b, c, d)                                \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t c_ = (c);                                              \
+       int32x4_t b_ = (b);                                              \
+       int64x2_t a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsl_high_laneq_u16(a, b, c, d)                                \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t c_ = (c);                                             \
+       uint16x8_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsl_high_laneq_u32(a, b, c, d)                                \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t c_ = (c);                                             \
+       uint32x4_t b_ = (b);                                             \
+       uint64x2_t a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[%4]"                         \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c)
+{
+  int32x4_t result;
+  __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c)
+{
+  int64x2_t result;
+  __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_high_n_u16 (uint32x4_t a, uint16x8_t b, uint16_t c)
+{
+  uint32x4_t result;
+  __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_high_n_u32 (uint64x2_t a, uint32x4_t b, uint32_t c)
+{
+  uint64x2_t result;
+  __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsl_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
+{
+  int16x8_t result;
+  __asm__ ("smlsl2 %0.8h,%2.16b,%3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
+{
+  int32x4_t result;
+  __asm__ ("smlsl2 %0.4s,%2.8h,%3.8h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
+{
+  int64x2_t result;
+  __asm__ ("smlsl2 %0.2d,%2.4s,%3.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsl_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
+{
+  uint16x8_t result;
+  __asm__ ("umlsl2 %0.8h,%2.16b,%3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
+{
+  uint32x4_t result;
+  __asm__ ("umlsl2 %0.4s,%2.8h,%3.8h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
+{
+  uint64x2_t result;
+  __asm__ ("umlsl2 %0.2d,%2.4s,%3.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vmlsl_lane_s16(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x4_t c_ = (c);                                              \
+       int16x4_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("smlsl %0.4s, %2.4h, %3.h[%4]"                          \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsl_lane_s32(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x2_t c_ = (c);                                              \
+       int32x2_t b_ = (b);                                              \
+       int64x2_t a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("smlsl %0.2d, %2.2s, %3.s[%4]"                          \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsl_lane_u16(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x4_t c_ = (c);                                             \
+       uint16x4_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("umlsl %0.4s, %2.4h, %3.h[%4]"                          \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsl_lane_u32(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x2_t c_ = (c);                                             \
+       uint32x2_t b_ = (b);                                             \
+       uint64x2_t a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("umlsl %0.2d, %2.2s, %3.s[%4]"                          \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsl_laneq_s16(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t c_ = (c);                                              \
+       int16x4_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("smlsl %0.4s, %2.4h, %3.h[%4]"                          \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsl_laneq_s32(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t c_ = (c);                                              \
+       int32x2_t b_ = (b);                                              \
+       int64x2_t a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("smlsl %0.2d, %2.2s, %3.s[%4]"                          \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsl_laneq_u16(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t c_ = (c);                                             \
+       uint16x4_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("umlsl %0.4s, %2.4h, %3.h[%4]"                          \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsl_laneq_u32(a, b, c, d)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t c_ = (c);                                             \
+       uint32x2_t b_ = (b);                                             \
+       uint64x2_t a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("umlsl %0.2d, %2.2s, %3.s[%4]"                          \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_n_s16 (int32x4_t a, int16x4_t b, int16_t c)
+{
+  int32x4_t result;
+  __asm__ ("smlsl %0.4s, %2.4h, %3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_n_s32 (int64x2_t a, int32x2_t b, int32_t c)
+{
+  int64x2_t result;
+  __asm__ ("smlsl %0.2d, %2.2s, %3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_n_u16 (uint32x4_t a, uint16x4_t b, uint16_t c)
+{
+  uint32x4_t result;
+  __asm__ ("umlsl %0.4s, %2.4h, %3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_n_u32 (uint64x2_t a, uint32x2_t b, uint32_t c)
+{
+  uint64x2_t result;
+  __asm__ ("umlsl %0.2d, %2.2s, %3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsl_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
+{
+  int16x8_t result;
+  __asm__ ("smlsl %0.8h, %2.8b, %3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
+{
+  int32x4_t result;
+  __asm__ ("smlsl %0.4s, %2.4h, %3.4h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
+{
+  int64x2_t result;
+  __asm__ ("smlsl %0.2d, %2.2s, %3.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsl_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
+{
+  uint16x8_t result;
+  __asm__ ("umlsl %0.8h, %2.8b, %3.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
+{
+  uint32x4_t result;
+  __asm__ ("umlsl %0.4s, %2.4h, %3.4h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
+{
+  uint64x2_t result;
+  __asm__ ("umlsl %0.2d, %2.2s, %3.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vmlsq_lane_f32(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t c_ = (c);                                            \
+       float32x4_t b_ = (b);                                            \
+       float32x4_t a_ = (a);                                            \
+       float32x4_t result;                                              \
+       float32x4_t t1;                                                  \
+       __asm__ ("fmul %1.4s, %3.4s, %4.s[%5]; fsub %0.4s, %0.4s, %1.4s" \
+                : "=w"(result), "=w"(t1)                                \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsq_lane_s16(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t c_ = (c);                                              \
+       int16x8_t b_ = (b);                                              \
+       int16x8_t a_ = (a);                                              \
+       int16x8_t result;                                                \
+       __asm__ ("mls %0.8h,%2.8h,%3.h[%4]"                              \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsq_lane_s32(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t c_ = (c);                                              \
+       int32x4_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("mls %0.4s,%2.4s,%3.s[%4]"                              \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsq_lane_u16(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t c_ = (c);                                             \
+       uint16x8_t b_ = (b);                                             \
+       uint16x8_t a_ = (a);                                             \
+       uint16x8_t result;                                               \
+       __asm__ ("mls %0.8h,%2.8h,%3.h[%4]"                              \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsq_lane_u32(a, b, c, d)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t c_ = (c);                                             \
+       uint32x4_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("mls %0.4s,%2.4s,%3.s[%4]"                              \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "w"(c_), "i"(d)                     \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmlsq_laneq_f32(__a, __b, __c, __d)                            \
+  __extension__                                                                \
+    ({                                                                 \
+       float32x4_t __c_ = (__c);                                       \
+       float32x4_t __b_ = (__b);                                       \
+       float32x4_t __a_ = (__a);                                       \
+       float32x4_t __result;                                           \
+       float32x4_t __t1;                                               \
+       __asm__ ("fmul %1.4s, %3.4s, %4.s[%5]; fsub %0.4s, %0.4s, %1.4s"        \
+                : "=w"(__result), "=w"(__t1)                           \
+                : "0"(__a_), "w"(__b_), "w"(__c_), "i"(__d)            \
+                : /* No clobbers */);                                  \
+       __result;                                                       \
+     })
+
+#define vmlsq_laneq_s16(__a, __b, __c, __d)                            \
+  __extension__                                                                \
+    ({                                                                 \
+       int16x8_t __c_ = (__c);                                         \
+       int16x8_t __b_ = (__b);                                         \
+       int16x8_t __a_ = (__a);                                         \
+       int16x8_t __result;                                             \
+       __asm__ ("mls %0.8h, %2.8h, %3.h[%4]"                           \
+                : "=w"(__result)                                       \
+                : "0"(__a_), "w"(__b_), "w"(__c_), "i"(__d)            \
+                : /* No clobbers */);                                  \
+       __result;                                                       \
+     })
+
+#define vmlsq_laneq_s32(__a, __b, __c, __d)                            \
+  __extension__                                                                \
+    ({                                                                 \
+       int32x4_t __c_ = (__c);                                         \
+       int32x4_t __b_ = (__b);                                         \
+       int32x4_t __a_ = (__a);                                         \
+       int32x4_t __result;                                             \
+       __asm__ ("mls %0.4s, %2.4s, %3.s[%4]"                           \
+                : "=w"(__result)                                       \
+                : "0"(__a_), "w"(__b_), "w"(__c_), "i"(__d)            \
+                : /* No clobbers */);                                  \
+       __result;                                                       \
+     })
+
+#define vmlsq_laneq_u16(__a, __b, __c, __d)                            \
+  __extension__                                                                \
+    ({                                                                 \
+       uint16x8_t __c_ = (__c);                                                \
+       uint16x8_t __b_ = (__b);                                                \
+       uint16x8_t __a_ = (__a);                                                \
+       uint16x8_t __result;                                            \
+       __asm__ ("mls %0.8h, %2.8h, %3.h[%4]"                           \
+                : "=w"(__result)                                       \
+                : "0"(__a_), "w"(__b_), "w"(__c_), "i"(__d)            \
+                : /* No clobbers */);                                  \
+       __result;                                                       \
+     })
+
+#define vmlsq_laneq_u32(__a, __b, __c, __d)                            \
+  __extension__                                                                \
+    ({                                                                 \
+       uint32x4_t __c_ = (__c);                                                \
+       uint32x4_t __b_ = (__b);                                                \
+       uint32x4_t __a_ = (__a);                                                \
+       uint32x4_t __result;                                            \
+       __asm__ ("mls %0.4s, %2.4s, %3.s[%4]"                           \
+                : "=w"(__result)                                       \
+                : "0"(__a_), "w"(__b_), "w"(__c_), "i"(__d)            \
+                : /* No clobbers */);                                  \
+       __result;                                                       \
+     })
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_f32 (float32x4_t a, float32x4_t b, float32_t c)
+{
+  float32x4_t result;
+  float32x4_t t1;
+  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fsub %0.4s, %0.4s, %1.4s"
+           : "=w"(result), "=w"(t1)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmlsq_n_f64 (float64x2_t a, float64x2_t b, float64_t c)
+{
+  float64x2_t result;
+  float64x2_t t1;
+  __asm__ ("fmul %1.2d, %3.2d, %4.d[0]; fsub %0.2d, %0.2d, %1.2d"
+           : "=w"(result), "=w"(t1)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_n_s16 (int16x8_t a, int16x8_t b, int16_t c)
+{
+  int16x8_t result;
+  __asm__ ("mls %0.8h, %2.8h, %3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_s32 (int32x4_t a, int32x4_t b, int32_t c)
+{
+  int32x4_t result;
+  __asm__ ("mls %0.4s, %2.4s, %3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c)
+{
+  uint16x8_t result;
+  __asm__ ("mls %0.8h, %2.8h, %3.h[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c)
+{
+  uint32x4_t result;
+  __asm__ ("mls %0.4s, %2.4s, %3.s[0]"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmlsq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
+{
+  int8x16_t result;
+  __asm__ ("mls %0.16b,%2.16b,%3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
+{
+  int16x8_t result;
+  __asm__ ("mls %0.8h,%2.8h,%3.8h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
+{
+  int32x4_t result;
+  __asm__ ("mls %0.4s,%2.4s,%3.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmlsq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
+{
+  uint8x16_t result;
+  __asm__ ("mls %0.16b,%2.16b,%3.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
+{
+  uint16x8_t result;
+  __asm__ ("mls %0.8h,%2.8h,%3.8h"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
+{
+  uint32x4_t result;
+  __asm__ ("mls %0.4s,%2.4s,%3.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmov_n_f32 (float32_t a)
+{
+  float32x2_t result;
+  __asm__ ("dup %0.2s, %w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmov_n_p8 (uint32_t a)
+{
+  poly8x8_t result;
+  __asm__ ("dup %0.8b,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vmov_n_p16 (uint32_t a)
+{
+  poly16x4_t result;
+  __asm__ ("dup %0.4h,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmov_n_s8 (int32_t a)
+{
+  int8x8_t result;
+  __asm__ ("dup %0.8b,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmov_n_s16 (int32_t a)
+{
+  int16x4_t result;
+  __asm__ ("dup %0.4h,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmov_n_s32 (int32_t a)
+{
+  int32x2_t result;
+  __asm__ ("dup %0.2s,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vmov_n_s64 (int64_t a)
+{
+  int64x1_t result;
+  __asm__ ("ins %0.d[0],%x1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmov_n_u8 (uint32_t a)
+{
+  uint8x8_t result;
+  __asm__ ("dup %0.8b,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmov_n_u16 (uint32_t a)
+{
+  uint16x4_t result;
+  __asm__ ("dup %0.4h,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmov_n_u32 (uint32_t a)
+{
+  uint32x2_t result;
+  __asm__ ("dup %0.2s,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vmov_n_u64 (uint64_t a)
+{
+  uint64x1_t result;
+  __asm__ ("ins %0.d[0],%x1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovl_high_s8 (int8x16_t a)
+{
+  int16x8_t result;
+  __asm__ ("sshll2 %0.8h,%1.16b,#0"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovl_high_s16 (int16x8_t a)
+{
+  int32x4_t result;
+  __asm__ ("sshll2 %0.4s,%1.8h,#0"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmovl_high_s32 (int32x4_t a)
+{
+  int64x2_t result;
+  __asm__ ("sshll2 %0.2d,%1.4s,#0"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovl_high_u8 (uint8x16_t a)
+{
+  uint16x8_t result;
+  __asm__ ("ushll2 %0.8h,%1.16b,#0"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovl_high_u16 (uint16x8_t a)
+{
+  uint32x4_t result;
+  __asm__ ("ushll2 %0.4s,%1.8h,#0"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmovl_high_u32 (uint32x4_t a)
+{
+  uint64x2_t result;
+  __asm__ ("ushll2 %0.2d,%1.4s,#0"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovl_s8 (int8x8_t a)
+{
+  int16x8_t result;
+  __asm__ ("sshll %0.8h,%1.8b,#0"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovl_s16 (int16x4_t a)
+{
+  int32x4_t result;
+  __asm__ ("sshll %0.4s,%1.4h,#0"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmovl_s32 (int32x2_t a)
+{
+  int64x2_t result;
+  __asm__ ("sshll %0.2d,%1.2s,#0"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovl_u8 (uint8x8_t a)
+{
+  uint16x8_t result;
+  __asm__ ("ushll %0.8h,%1.8b,#0"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovl_u16 (uint16x4_t a)
+{
+  uint32x4_t result;
+  __asm__ ("ushll %0.4s,%1.4h,#0"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmovl_u32 (uint32x2_t a)
+{
+  uint64x2_t result;
+  __asm__ ("ushll %0.2d,%1.2s,#0"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmovn_high_s16 (int8x8_t a, int16x8_t b)
+{
+  int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
+  __asm__ ("xtn2 %0.16b,%2.8h"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovn_high_s32 (int16x4_t a, int32x4_t b)
+{
+  int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
+  __asm__ ("xtn2 %0.8h,%2.4s"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovn_high_s64 (int32x2_t a, int64x2_t b)
+{
+  int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
+  __asm__ ("xtn2 %0.4s,%2.2d"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmovn_high_u16 (uint8x8_t a, uint16x8_t b)
+{
+  uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
+  __asm__ ("xtn2 %0.16b,%2.8h"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovn_high_u32 (uint16x4_t a, uint32x4_t b)
+{
+  uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
+  __asm__ ("xtn2 %0.8h,%2.4s"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovn_high_u64 (uint32x2_t a, uint64x2_t b)
+{
+  uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
+  __asm__ ("xtn2 %0.4s,%2.2d"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmovn_s16 (int16x8_t a)
+{
+  int8x8_t result;
+  __asm__ ("xtn %0.8b,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmovn_s32 (int32x4_t a)
+{
+  int16x4_t result;
+  __asm__ ("xtn %0.4h,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmovn_s64 (int64x2_t a)
+{
+  int32x2_t result;
+  __asm__ ("xtn %0.2s,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmovn_u16 (uint16x8_t a)
+{
+  uint8x8_t result;
+  __asm__ ("xtn %0.8b,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmovn_u32 (uint32x4_t a)
+{
+  uint16x4_t result;
+  __asm__ ("xtn %0.4h,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmovn_u64 (uint64x2_t a)
+{
+  uint32x2_t result;
+  __asm__ ("xtn %0.2s,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmovq_n_f32 (float32_t a)
+{
+  float32x4_t result;
+  __asm__ ("dup %0.4s, %w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmovq_n_f64 (float64_t a)
+{
+  return (float64x2_t) {a, a};
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmovq_n_p8 (uint32_t a)
+{
+  poly8x16_t result;
+  __asm__ ("dup %0.16b,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vmovq_n_p16 (uint32_t a)
+{
+  poly16x8_t result;
+  __asm__ ("dup %0.8h,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmovq_n_s8 (int32_t a)
+{
+  int8x16_t result;
+  __asm__ ("dup %0.16b,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovq_n_s16 (int32_t a)
+{
+  int16x8_t result;
+  __asm__ ("dup %0.8h,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovq_n_s32 (int32_t a)
+{
+  int32x4_t result;
+  __asm__ ("dup %0.4s,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmovq_n_s64 (int64_t a)
+{
+  int64x2_t result;
+  __asm__ ("dup %0.2d,%x1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmovq_n_u8 (uint32_t a)
+{
+  uint8x16_t result;
+  __asm__ ("dup %0.16b,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovq_n_u16 (uint32_t a)
+{
+  uint16x8_t result;
+  __asm__ ("dup %0.8h,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovq_n_u32 (uint32_t a)
+{
+  uint32x4_t result;
+  __asm__ ("dup %0.4s,%w1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmovq_n_u64 (uint64_t a)
+{
+  uint64x2_t result;
+  __asm__ ("dup %0.2d,%x1"
+           : "=w"(result)
+           : "r"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vmul_lane_f32(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x2_t b_ = (b);                                            \
+       float32x2_t a_ = (a);                                            \
+       float32x2_t result;                                              \
+       __asm__ ("fmul %0.2s,%1.2s,%2.s[%3]"                             \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmul_lane_s16(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x4_t b_ = (b);                                              \
+       int16x4_t a_ = (a);                                              \
+       int16x4_t result;                                                \
+       __asm__ ("mul %0.4h,%1.4h,%2.h[%3]"                              \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmul_lane_s32(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x2_t b_ = (b);                                              \
+       int32x2_t a_ = (a);                                              \
+       int32x2_t result;                                                \
+       __asm__ ("mul %0.2s,%1.2s,%2.s[%3]"                              \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmul_lane_u16(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x4_t b_ = (b);                                             \
+       uint16x4_t a_ = (a);                                             \
+       uint16x4_t result;                                               \
+       __asm__ ("mul %0.4h,%1.4h,%2.h[%3]"                              \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmul_lane_u32(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x2_t b_ = (b);                                             \
+       uint32x2_t a_ = (a);                                             \
+       uint32x2_t result;                                               \
+       __asm__ ("mul %0.2s, %1.2s, %2.s[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmul_laneq_f32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t b_ = (b);                                            \
+       float32x2_t a_ = (a);                                            \
+       float32x2_t result;                                              \
+       __asm__ ("fmul %0.2s, %1.2s, %2.s[%3]"                           \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmul_laneq_s16(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t b_ = (b);                                              \
+       int16x4_t a_ = (a);                                              \
+       int16x4_t result;                                                \
+       __asm__ ("mul %0.4h, %1.4h, %2.h[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmul_laneq_s32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t b_ = (b);                                              \
+       int32x2_t a_ = (a);                                              \
+       int32x2_t result;                                                \
+       __asm__ ("mul %0.2s, %1.2s, %2.s[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmul_laneq_u16(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t b_ = (b);                                             \
+       uint16x4_t a_ = (a);                                             \
+       uint16x4_t result;                                               \
+       __asm__ ("mul %0.4h, %1.4h, %2.h[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmul_laneq_u32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t b_ = (b);                                             \
+       uint32x2_t a_ = (a);                                             \
+       uint32x2_t result;                                               \
+       __asm__ ("mul %0.2s, %1.2s, %2.s[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_n_f32 (float32x2_t a, float32_t b)
+{
+  float32x2_t result;
+  __asm__ ("fmul %0.2s,%1.2s,%2.s[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_n_s16 (int16x4_t a, int16_t b)
+{
+  int16x4_t result;
+  __asm__ ("mul %0.4h,%1.4h,%2.h[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_n_s32 (int32x2_t a, int32_t b)
+{
+  int32x2_t result;
+  __asm__ ("mul %0.2s,%1.2s,%2.s[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_n_u16 (uint16x4_t a, uint16_t b)
+{
+  uint16x4_t result;
+  __asm__ ("mul %0.4h,%1.4h,%2.h[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_n_u32 (uint32x2_t a, uint32_t b)
+{
+  uint32x2_t result;
+  __asm__ ("mul %0.2s,%1.2s,%2.s[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vmuld_lane_f64(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x2_t b_ = (b);                                            \
+       float64_t a_ = (a);                                              \
+       float64_t result;                                                \
+       __asm__ ("fmul %d0,%d1,%2.d[%3]"                                 \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmull_high_lane_s16(a, b, c)                                    \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t b_ = (b);                                              \
+       int16x8_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("smull2 %0.4s, %1.8h, %2.h[%3]"                         \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmull_high_lane_s32(a, b, c)                                    \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("smull2 %0.2d, %1.4s, %2.s[%3]"                         \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmull_high_lane_u16(a, b, c)                                    \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t b_ = (b);                                             \
+       uint16x8_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("umull2 %0.4s, %1.8h, %2.h[%3]"                         \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmull_high_lane_u32(a, b, c)                                    \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("umull2 %0.2d, %1.4s, %2.s[%3]"                         \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmull_high_laneq_s16(a, b, c)                                   \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t b_ = (b);                                              \
+       int16x8_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("smull2 %0.4s, %1.8h, %2.h[%3]"                         \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmull_high_laneq_s32(a, b, c)                                   \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("smull2 %0.2d, %1.4s, %2.s[%3]"                         \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmull_high_laneq_u16(a, b, c)                                   \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t b_ = (b);                                             \
+       uint16x8_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("umull2 %0.4s, %1.8h, %2.h[%3]"                         \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmull_high_laneq_u32(a, b, c)                                   \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("umull2 %0.2d, %1.4s, %2.s[%3]"                         \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_high_n_s16 (int16x8_t a, int16_t b)
+{
+  int32x4_t result;
+  __asm__ ("smull2 %0.4s,%1.8h,%2.h[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_high_n_s32 (int32x4_t a, int32_t b)
+{
+  int64x2_t result;
+  __asm__ ("smull2 %0.2d,%1.4s,%2.s[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_high_n_u16 (uint16x8_t a, uint16_t b)
+{
+  uint32x4_t result;
+  __asm__ ("umull2 %0.4s,%1.8h,%2.h[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_high_n_u32 (uint32x4_t a, uint32_t b)
+{
+  uint64x2_t result;
+  __asm__ ("umull2 %0.2d,%1.4s,%2.s[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vmull_high_p8 (poly8x16_t a, poly8x16_t b)
+{
+  poly16x8_t result;
+  __asm__ ("pmull2 %0.8h,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmull_high_s8 (int8x16_t a, int8x16_t b)
+{
+  int16x8_t result;
+  __asm__ ("smull2 %0.8h,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_high_s16 (int16x8_t a, int16x8_t b)
+{
+  int32x4_t result;
+  __asm__ ("smull2 %0.4s,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_high_s32 (int32x4_t a, int32x4_t b)
+{
+  int64x2_t result;
+  __asm__ ("smull2 %0.2d,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmull_high_u8 (uint8x16_t a, uint8x16_t b)
+{
+  uint16x8_t result;
+  __asm__ ("umull2 %0.8h,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_high_u16 (uint16x8_t a, uint16x8_t b)
+{
+  uint32x4_t result;
+  __asm__ ("umull2 %0.4s,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_high_u32 (uint32x4_t a, uint32x4_t b)
+{
+  uint64x2_t result;
+  __asm__ ("umull2 %0.2d,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vmull_lane_s16(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x4_t b_ = (b);                                              \
+       int16x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("smull %0.4s,%1.4h,%2.h[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmull_lane_s32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x2_t b_ = (b);                                              \
+       int32x2_t a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("smull %0.2d,%1.2s,%2.s[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmull_lane_u16(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x4_t b_ = (b);                                             \
+       uint16x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("umull %0.4s,%1.4h,%2.h[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmull_lane_u32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x2_t b_ = (b);                                             \
+       uint32x2_t a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("umull %0.2d, %1.2s, %2.s[%3]"                          \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmull_laneq_s16(a, b, c)                                        \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t b_ = (b);                                              \
+       int16x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("smull %0.4s, %1.4h, %2.h[%3]"                          \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmull_laneq_s32(a, b, c)                                        \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t b_ = (b);                                              \
+       int32x2_t a_ = (a);                                              \
+       int64x2_t result;                                                \
+       __asm__ ("smull %0.2d, %1.2s, %2.s[%3]"                          \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmull_laneq_u16(a, b, c)                                        \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t b_ = (b);                                             \
+       uint16x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("umull %0.4s, %1.4h, %2.h[%3]"                          \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmull_laneq_u32(a, b, c)                                        \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t b_ = (b);                                             \
+       uint32x2_t a_ = (a);                                             \
+       uint64x2_t result;                                               \
+       __asm__ ("umull %0.2d, %1.2s, %2.s[%3]"                          \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_n_s16 (int16x4_t a, int16_t b)
+{
+  int32x4_t result;
+  __asm__ ("smull %0.4s,%1.4h,%2.h[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_n_s32 (int32x2_t a, int32_t b)
+{
+  int64x2_t result;
+  __asm__ ("smull %0.2d,%1.2s,%2.s[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_n_u16 (uint16x4_t a, uint16_t b)
+{
+  uint32x4_t result;
+  __asm__ ("umull %0.4s,%1.4h,%2.h[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_n_u32 (uint32x2_t a, uint32_t b)
+{
+  uint64x2_t result;
+  __asm__ ("umull %0.2d,%1.2s,%2.s[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vmull_p8 (poly8x8_t a, poly8x8_t b)
+{
+  poly16x8_t result;
+  __asm__ ("pmull %0.8h, %1.8b, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmull_s8 (int8x8_t a, int8x8_t b)
+{
+  int16x8_t result;
+  __asm__ ("smull %0.8h, %1.8b, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_s16 (int16x4_t a, int16x4_t b)
+{
+  int32x4_t result;
+  __asm__ ("smull %0.4s, %1.4h, %2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_s32 (int32x2_t a, int32x2_t b)
+{
+  int64x2_t result;
+  __asm__ ("smull %0.2d, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmull_u8 (uint8x8_t a, uint8x8_t b)
+{
+  uint16x8_t result;
+  __asm__ ("umull %0.8h, %1.8b, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_u16 (uint16x4_t a, uint16x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("umull %0.4s, %1.4h, %2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_u32 (uint32x2_t a, uint32x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("umull %0.2d, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vmulq_lane_f32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x2_t b_ = (b);                                            \
+       float32x4_t a_ = (a);                                            \
+       float32x4_t result;                                              \
+       __asm__ ("fmul %0.4s, %1.4s, %2.s[%3]"                           \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmulq_lane_f64(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x1_t b_ = (b);                                            \
+       float64x2_t a_ = (a);                                            \
+       float64x2_t result;                                              \
+       __asm__ ("fmul %0.2d,%1.2d,%2.d[%3]"                             \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmulq_lane_s16(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x4_t b_ = (b);                                              \
+       int16x8_t a_ = (a);                                              \
+       int16x8_t result;                                                \
+       __asm__ ("mul %0.8h,%1.8h,%2.h[%3]"                              \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmulq_lane_s32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x2_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("mul %0.4s,%1.4s,%2.s[%3]"                              \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmulq_lane_u16(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x4_t b_ = (b);                                             \
+       uint16x8_t a_ = (a);                                             \
+       uint16x8_t result;                                               \
+       __asm__ ("mul %0.8h,%1.8h,%2.h[%3]"                              \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmulq_lane_u32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x2_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("mul %0.4s, %1.4s, %2.s[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmulq_laneq_f32(a, b, c)                                        \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t b_ = (b);                                            \
+       float32x4_t a_ = (a);                                            \
+       float32x4_t result;                                              \
+       __asm__ ("fmul %0.4s, %1.4s, %2.s[%3]"                           \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmulq_laneq_f64(a, b, c)                                        \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x2_t b_ = (b);                                            \
+       float64x2_t a_ = (a);                                            \
+       float64x2_t result;                                              \
+       __asm__ ("fmul %0.2d,%1.2d,%2.d[%3]"                             \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmulq_laneq_s16(a, b, c)                                        \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t b_ = (b);                                              \
+       int16x8_t a_ = (a);                                              \
+       int16x8_t result;                                                \
+       __asm__ ("mul %0.8h, %1.8h, %2.h[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmulq_laneq_s32(a, b, c)                                        \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t b_ = (b);                                              \
+       int32x4_t a_ = (a);                                              \
+       int32x4_t result;                                                \
+       __asm__ ("mul %0.4s, %1.4s, %2.s[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmulq_laneq_u16(a, b, c)                                        \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t b_ = (b);                                             \
+       uint16x8_t a_ = (a);                                             \
+       uint16x8_t result;                                               \
+       __asm__ ("mul %0.8h, %1.8h, %2.h[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmulq_laneq_u32(a, b, c)                                        \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t b_ = (b);                                             \
+       uint32x4_t a_ = (a);                                             \
+       uint32x4_t result;                                               \
+       __asm__ ("mul %0.4s, %1.4s, %2.s[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_n_f32 (float32x4_t a, float32_t b)
+{
+  float32x4_t result;
+  __asm__ ("fmul %0.4s,%1.4s,%2.s[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmulq_n_f64 (float64x2_t a, float64_t b)
+{
+  float64x2_t result;
+  __asm__ ("fmul %0.2d,%1.2d,%2.d[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_n_s16 (int16x8_t a, int16_t b)
+{
+  int16x8_t result;
+  __asm__ ("mul %0.8h,%1.8h,%2.h[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_n_s32 (int32x4_t a, int32_t b)
+{
+  int32x4_t result;
+  __asm__ ("mul %0.4s,%1.4s,%2.s[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_n_u16 (uint16x8_t a, uint16_t b)
+{
+  uint16x8_t result;
+  __asm__ ("mul %0.8h,%1.8h,%2.h[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_n_u32 (uint32x4_t a, uint32_t b)
+{
+  uint32x4_t result;
+  __asm__ ("mul %0.4s,%1.4s,%2.s[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vmuls_lane_f32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t b_ = (b);                                            \
+       float32_t a_ = (a);                                              \
+       float32_t result;                                                \
+       __asm__ ("fmul %s0,%s1,%2.s[%3]"                                 \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmulx_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("fmulx %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vmulx_lane_f32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t b_ = (b);                                            \
+       float32x2_t a_ = (a);                                            \
+       float32x2_t result;                                              \
+       __asm__ ("fmulx %0.2s,%1.2s,%2.s[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vmulxd_f64 (float64_t a, float64_t b)
+{
+  float64_t result;
+  __asm__ ("fmulx %d0, %d1, %d2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulxq_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("fmulx %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmulxq_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("fmulx %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vmulxq_lane_f32(a, b, c)                                        \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t b_ = (b);                                            \
+       float32x4_t a_ = (a);                                            \
+       float32x4_t result;                                              \
+       __asm__ ("fmulx %0.4s,%1.4s,%2.s[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vmulxq_lane_f64(a, b, c)                                        \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x2_t b_ = (b);                                            \
+       float64x2_t a_ = (a);                                            \
+       float64x2_t result;                                              \
+       __asm__ ("fmulx %0.2d,%1.2d,%2.d[%3]"                            \
+                : "=w"(result)                                          \
+                : "w"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vmulxs_f32 (float32_t a, float32_t b)
+{
+  float32_t result;
+  __asm__ ("fmulx %s0, %s1, %s2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmvn_p8 (poly8x8_t a)
+{
+  poly8x8_t result;
+  __asm__ ("mvn %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmvn_s8 (int8x8_t a)
+{
+  int8x8_t result;
+  __asm__ ("mvn %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmvn_s16 (int16x4_t a)
+{
+  int16x4_t result;
+  __asm__ ("mvn %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmvn_s32 (int32x2_t a)
+{
+  int32x2_t result;
+  __asm__ ("mvn %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmvn_u8 (uint8x8_t a)
+{
+  uint8x8_t result;
+  __asm__ ("mvn %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmvn_u16 (uint16x4_t a)
+{
+  uint16x4_t result;
+  __asm__ ("mvn %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmvn_u32 (uint32x2_t a)
+{
+  uint32x2_t result;
+  __asm__ ("mvn %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmvnq_p8 (poly8x16_t a)
+{
+  poly8x16_t result;
+  __asm__ ("mvn %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmvnq_s8 (int8x16_t a)
+{
+  int8x16_t result;
+  __asm__ ("mvn %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmvnq_s16 (int16x8_t a)
+{
+  int16x8_t result;
+  __asm__ ("mvn %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmvnq_s32 (int32x4_t a)
+{
+  int32x4_t result;
+  __asm__ ("mvn %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmvnq_u8 (uint8x16_t a)
+{
+  uint8x16_t result;
+  __asm__ ("mvn %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmvnq_u16 (uint16x8_t a)
+{
+  uint16x8_t result;
+  __asm__ ("mvn %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmvnq_u32 (uint32x4_t a)
+{
+  uint32x4_t result;
+  __asm__ ("mvn %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vneg_f32 (float32x2_t a)
+{
+  float32x2_t result;
+  __asm__ ("fneg %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vneg_s8 (int8x8_t a)
+{
+  int8x8_t result;
+  __asm__ ("neg %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vneg_s16 (int16x4_t a)
+{
+  int16x4_t result;
+  __asm__ ("neg %0.4h,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vneg_s32 (int32x2_t a)
+{
+  int32x2_t result;
+  __asm__ ("neg %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vnegq_f32 (float32x4_t a)
+{
+  float32x4_t result;
+  __asm__ ("fneg %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vnegq_f64 (float64x2_t a)
+{
+  float64x2_t result;
+  __asm__ ("fneg %0.2d,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vnegq_s8 (int8x16_t a)
+{
+  int8x16_t result;
+  __asm__ ("neg %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vnegq_s16 (int16x8_t a)
+{
+  int16x8_t result;
+  __asm__ ("neg %0.8h,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vnegq_s32 (int32x4_t a)
+{
+  int32x4_t result;
+  __asm__ ("neg %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vnegq_s64 (int64x2_t a)
+{
+  int64x2_t result;
+  __asm__ ("neg %0.2d,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpadal_s8 (int16x4_t a, int8x8_t b)
+{
+  int16x4_t result;
+  __asm__ ("sadalp %0.4h,%2.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpadal_s16 (int32x2_t a, int16x4_t b)
+{
+  int32x2_t result;
+  __asm__ ("sadalp %0.2s,%2.4h"
+           : "=w"(result)
+           : "0"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vpadal_s32 (int64x1_t a, int32x2_t b)
+{
+  int64x1_t result;
+  __asm__ ("sadalp %0.1d,%2.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpadal_u8 (uint16x4_t a, uint8x8_t b)
+{
+  uint16x4_t result;
+  __asm__ ("uadalp %0.4h,%2.8b"
+           : "=w"(result)
+           : "0"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpadal_u16 (uint32x2_t a, uint16x4_t b)
+{
+  uint32x2_t result;
+  __asm__ ("uadalp %0.2s,%2.4h"
+           : "=w"(result)
+           : "0"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vpadal_u32 (uint64x1_t a, uint32x2_t b)
+{
+  uint64x1_t result;
+  __asm__ ("uadalp %0.1d,%2.2s"
+           : "=w"(result)
+           : "0"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpadalq_s8 (int16x8_t a, int8x16_t b)
+{
+  int16x8_t result;
+  __asm__ ("sadalp %0.8h,%2.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpadalq_s16 (int32x4_t a, int16x8_t b)
+{
+  int32x4_t result;
+  __asm__ ("sadalp %0.4s,%2.8h"
+           : "=w"(result)
+           : "0"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vpadalq_s32 (int64x2_t a, int32x4_t b)
+{
+  int64x2_t result;
+  __asm__ ("sadalp %0.2d,%2.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpadalq_u8 (uint16x8_t a, uint8x16_t b)
+{
+  uint16x8_t result;
+  __asm__ ("uadalp %0.8h,%2.16b"
+           : "=w"(result)
+           : "0"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpadalq_u16 (uint32x4_t a, uint16x8_t b)
+{
+  uint32x4_t result;
+  __asm__ ("uadalp %0.4s,%2.8h"
+           : "=w"(result)
+           : "0"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vpadalq_u32 (uint64x2_t a, uint32x4_t b)
+{
+  uint64x2_t result;
+  __asm__ ("uadalp %0.2d,%2.4s"
+           : "=w"(result)
+           : "0"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpadd_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("faddp %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return __builtin_aarch64_addpv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return __builtin_aarch64_addpv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return __builtin_aarch64_addpv2si (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_addpv8qi ((int8x8_t) __a,
+                                                (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_addpv4hi ((int16x4_t) __a,
+                                                 (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_addpv2si ((int32x2_t) __a,
+                                                 (int32x2_t) __b);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vpaddd_f64 (float64x2_t a)
+{
+  float64_t result;
+  __asm__ ("faddp %d0,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpaddl_s8 (int8x8_t a)
+{
+  int16x4_t result;
+  __asm__ ("saddlp %0.4h,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpaddl_s16 (int16x4_t a)
+{
+  int32x2_t result;
+  __asm__ ("saddlp %0.2s,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vpaddl_s32 (int32x2_t a)
+{
+  int64x1_t result;
+  __asm__ ("saddlp %0.1d,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpaddl_u8 (uint8x8_t a)
+{
+  uint16x4_t result;
+  __asm__ ("uaddlp %0.4h,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpaddl_u16 (uint16x4_t a)
+{
+  uint32x2_t result;
+  __asm__ ("uaddlp %0.2s,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vpaddl_u32 (uint32x2_t a)
+{
+  uint64x1_t result;
+  __asm__ ("uaddlp %0.1d,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpaddlq_s8 (int8x16_t a)
+{
+  int16x8_t result;
+  __asm__ ("saddlp %0.8h,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpaddlq_s16 (int16x8_t a)
+{
+  int32x4_t result;
+  __asm__ ("saddlp %0.4s,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vpaddlq_s32 (int32x4_t a)
+{
+  int64x2_t result;
+  __asm__ ("saddlp %0.2d,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpaddlq_u8 (uint8x16_t a)
+{
+  uint16x8_t result;
+  __asm__ ("uaddlp %0.8h,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpaddlq_u16 (uint16x8_t a)
+{
+  uint32x4_t result;
+  __asm__ ("uaddlp %0.4s,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vpaddlq_u32 (uint32x4_t a)
+{
+  uint64x2_t result;
+  __asm__ ("uaddlp %0.2d,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpaddq_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("faddp %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpaddq_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("faddp %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vpaddq_s8 (int8x16_t a, int8x16_t b)
+{
+  int8x16_t result;
+  __asm__ ("addp %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpaddq_s16 (int16x8_t a, int16x8_t b)
+{
+  int16x8_t result;
+  __asm__ ("addp %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpaddq_s32 (int32x4_t a, int32x4_t b)
+{
+  int32x4_t result;
+  __asm__ ("addp %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vpaddq_s64 (int64x2_t a, int64x2_t b)
+{
+  int64x2_t result;
+  __asm__ ("addp %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vpaddq_u8 (uint8x16_t a, uint8x16_t b)
+{
+  uint8x16_t result;
+  __asm__ ("addp %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpaddq_u16 (uint16x8_t a, uint16x8_t b)
+{
+  uint16x8_t result;
+  __asm__ ("addp %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpaddq_u32 (uint32x4_t a, uint32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("addp %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vpaddq_u64 (uint64x2_t a, uint64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("addp %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpadds_f32 (float32x2_t a)
+{
+  float32_t result;
+  __asm__ ("faddp %s0,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpmax_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("fmaxp %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpmax_s8 (int8x8_t a, int8x8_t b)
+{
+  int8x8_t result;
+  __asm__ ("smaxp %0.8b, %1.8b, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpmax_s16 (int16x4_t a, int16x4_t b)
+{
+  int16x4_t result;
+  __asm__ ("smaxp %0.4h, %1.4h, %2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpmax_s32 (int32x2_t a, int32x2_t b)
+{
+  int32x2_t result;
+  __asm__ ("smaxp %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpmax_u8 (uint8x8_t a, uint8x8_t b)
+{
+  uint8x8_t result;
+  __asm__ ("umaxp %0.8b, %1.8b, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpmax_u16 (uint16x4_t a, uint16x4_t b)
+{
+  uint16x4_t result;
+  __asm__ ("umaxp %0.4h, %1.4h, %2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpmax_u32 (uint32x2_t a, uint32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("umaxp %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpmaxnm_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("fmaxnmp %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpmaxnmq_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("fmaxnmp %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpmaxnmq_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("fmaxnmp %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vpmaxnmqd_f64 (float64x2_t a)
+{
+  float64_t result;
+  __asm__ ("fmaxnmp %d0,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpmaxnms_f32 (float32x2_t a)
+{
+  float32_t result;
+  __asm__ ("fmaxnmp %s0,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpmaxq_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("fmaxp %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpmaxq_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("fmaxp %0.2d, %1.2d, %2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vpmaxq_s8 (int8x16_t a, int8x16_t b)
+{
+  int8x16_t result;
+  __asm__ ("smaxp %0.16b, %1.16b, %2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpmaxq_s16 (int16x8_t a, int16x8_t b)
+{
+  int16x8_t result;
+  __asm__ ("smaxp %0.8h, %1.8h, %2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpmaxq_s32 (int32x4_t a, int32x4_t b)
+{
+  int32x4_t result;
+  __asm__ ("smaxp %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vpmaxq_u8 (uint8x16_t a, uint8x16_t b)
+{
+  uint8x16_t result;
+  __asm__ ("umaxp %0.16b, %1.16b, %2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpmaxq_u16 (uint16x8_t a, uint16x8_t b)
+{
+  uint16x8_t result;
+  __asm__ ("umaxp %0.8h, %1.8h, %2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpmaxq_u32 (uint32x4_t a, uint32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("umaxp %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vpmaxqd_f64 (float64x2_t a)
+{
+  float64_t result;
+  __asm__ ("fmaxp %d0,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpmaxs_f32 (float32x2_t a)
+{
+  float32_t result;
+  __asm__ ("fmaxp %s0,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpmin_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("fminp %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpmin_s8 (int8x8_t a, int8x8_t b)
+{
+  int8x8_t result;
+  __asm__ ("sminp %0.8b, %1.8b, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpmin_s16 (int16x4_t a, int16x4_t b)
+{
+  int16x4_t result;
+  __asm__ ("sminp %0.4h, %1.4h, %2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpmin_s32 (int32x2_t a, int32x2_t b)
+{
+  int32x2_t result;
+  __asm__ ("sminp %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpmin_u8 (uint8x8_t a, uint8x8_t b)
+{
+  uint8x8_t result;
+  __asm__ ("uminp %0.8b, %1.8b, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpmin_u16 (uint16x4_t a, uint16x4_t b)
+{
+  uint16x4_t result;
+  __asm__ ("uminp %0.4h, %1.4h, %2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpmin_u32 (uint32x2_t a, uint32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("uminp %0.2s, %1.2s, %2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpminnm_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("fminnmp %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpminnmq_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("fminnmp %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpminnmq_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("fminnmp %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vpminnmqd_f64 (float64x2_t a)
+{
+  float64_t result;
+  __asm__ ("fminnmp %d0,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpminnms_f32 (float32x2_t a)
+{
+  float32_t result;
+  __asm__ ("fminnmp %s0,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpminq_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("fminp %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpminq_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("fminp %0.2d, %1.2d, %2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vpminq_s8 (int8x16_t a, int8x16_t b)
+{
+  int8x16_t result;
+  __asm__ ("sminp %0.16b, %1.16b, %2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpminq_s16 (int16x8_t a, int16x8_t b)
+{
+  int16x8_t result;
+  __asm__ ("sminp %0.8h, %1.8h, %2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpminq_s32 (int32x4_t a, int32x4_t b)
+{
+  int32x4_t result;
+  __asm__ ("sminp %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vpminq_u8 (uint8x16_t a, uint8x16_t b)
+{
+  uint8x16_t result;
+  __asm__ ("uminp %0.16b, %1.16b, %2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpminq_u16 (uint16x8_t a, uint16x8_t b)
+{
+  uint16x8_t result;
+  __asm__ ("uminp %0.8h, %1.8h, %2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpminq_u32 (uint32x4_t a, uint32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("uminp %0.4s, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vpminqd_f64 (float64x2_t a)
+{
+  float64_t result;
+  __asm__ ("fminp %d0,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpmins_f32 (float32x2_t a)
+{
+  float32_t result;
+  __asm__ ("fminp %s0,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_n_s16 (int16x4_t a, int16_t b)
+{
+  int16x4_t result;
+  __asm__ ("sqdmulh %0.4h,%1.4h,%2.h[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_n_s32 (int32x2_t a, int32_t b)
+{
+  int32x2_t result;
+  __asm__ ("sqdmulh %0.2s,%1.2s,%2.s[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_n_s16 (int16x8_t a, int16_t b)
+{
+  int16x8_t result;
+  __asm__ ("sqdmulh %0.8h,%1.8h,%2.h[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_n_s32 (int32x4_t a, int32_t b)
+{
+  int32x4_t result;
+  __asm__ ("sqdmulh %0.4s,%1.4s,%2.s[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqmovn_high_s16 (int8x8_t a, int16x8_t b)
+{
+  int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
+  __asm__ ("sqxtn2 %0.16b, %2.8h"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqmovn_high_s32 (int16x4_t a, int32x4_t b)
+{
+  int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
+  __asm__ ("sqxtn2 %0.8h, %2.4s"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqmovn_high_s64 (int32x2_t a, int64x2_t b)
+{
+  int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
+  __asm__ ("sqxtn2 %0.4s, %2.2d"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqmovn_high_u16 (uint8x8_t a, uint16x8_t b)
+{
+  uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
+  __asm__ ("uqxtn2 %0.16b, %2.8h"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqmovn_high_u32 (uint16x4_t a, uint32x4_t b)
+{
+  uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
+  __asm__ ("uqxtn2 %0.8h, %2.4s"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqmovn_high_u64 (uint32x2_t a, uint64x2_t b)
+{
+  uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
+  __asm__ ("uqxtn2 %0.4s, %2.2d"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqmovun_high_s16 (uint8x8_t a, int16x8_t b)
+{
+  uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
+  __asm__ ("sqxtun2 %0.16b, %2.8h"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqmovun_high_s32 (uint16x4_t a, int32x4_t b)
+{
+  uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
+  __asm__ ("sqxtun2 %0.8h, %2.4s"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqmovun_high_s64 (uint32x2_t a, int64x2_t b)
+{
+  uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
+  __asm__ ("sqxtun2 %0.4s, %2.2d"
+           : "+w"(result)
+           : "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_n_s16 (int16x4_t a, int16_t b)
+{
+  int16x4_t result;
+  __asm__ ("sqrdmulh %0.4h,%1.4h,%2.h[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_n_s32 (int32x2_t a, int32_t b)
+{
+  int32x2_t result;
+  __asm__ ("sqrdmulh %0.2s,%1.2s,%2.s[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_n_s16 (int16x8_t a, int16_t b)
+{
+  int16x8_t result;
+  __asm__ ("sqrdmulh %0.8h,%1.8h,%2.h[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_n_s32 (int32x4_t a, int32_t b)
+{
+  int32x4_t result;
+  __asm__ ("sqrdmulh %0.4s,%1.4s,%2.s[0]"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vqrshrn_high_n_s16(a, b, c)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t b_ = (b);                                              \
+       int8x8_t a_ = (a);                                               \
+       int8x16_t result = vcombine_s8                                   \
+                            (a_, vcreate_s8 (UINT64_C (0x0)));          \
+       __asm__ ("sqrshrn2 %0.16b, %1.8h, #%2"                           \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqrshrn_high_n_s32(a, b, c)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t b_ = (b);                                              \
+       int16x4_t a_ = (a);                                              \
+       int16x8_t result = vcombine_s16                                  \
+                            (a_, vcreate_s16 (UINT64_C (0x0)));         \
+       __asm__ ("sqrshrn2 %0.8h, %1.4s, #%2"                            \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqrshrn_high_n_s64(a, b, c)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x2_t b_ = (b);                                              \
+       int32x2_t a_ = (a);                                              \
+       int32x4_t result = vcombine_s32                                  \
+                            (a_, vcreate_s32 (UINT64_C (0x0)));         \
+       __asm__ ("sqrshrn2 %0.4s, %1.2d, #%2"                            \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqrshrn_high_n_u16(a, b, c)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t b_ = (b);                                             \
+       uint8x8_t a_ = (a);                                              \
+       uint8x16_t result = vcombine_u8                                  \
+                             (a_, vcreate_u8 (UINT64_C (0x0)));         \
+       __asm__ ("uqrshrn2 %0.16b, %1.8h, #%2"                           \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqrshrn_high_n_u32(a, b, c)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t b_ = (b);                                             \
+       uint16x4_t a_ = (a);                                             \
+       uint16x8_t result = vcombine_u16                                 \
+                             (a_, vcreate_u16 (UINT64_C (0x0)));        \
+       __asm__ ("uqrshrn2 %0.8h, %1.4s, #%2"                            \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqrshrn_high_n_u64(a, b, c)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x2_t b_ = (b);                                             \
+       uint32x2_t a_ = (a);                                             \
+       uint32x4_t result = vcombine_u32                                 \
+                             (a_, vcreate_u32 (UINT64_C (0x0)));        \
+       __asm__ ("uqrshrn2 %0.4s, %1.2d, #%2"                            \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqrshrun_high_n_s16(a, b, c)                                    \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t b_ = (b);                                              \
+       uint8x8_t a_ = (a);                                              \
+       uint8x16_t result = vcombine_u8                                  \
+                             (a_, vcreate_u8 (UINT64_C (0x0)));         \
+       __asm__ ("sqrshrun2 %0.16b, %1.8h, #%2"                          \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqrshrun_high_n_s32(a, b, c)                                    \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t b_ = (b);                                              \
+       uint16x4_t a_ = (a);                                             \
+       uint16x8_t result = vcombine_u16                                 \
+                             (a_, vcreate_u16 (UINT64_C (0x0)));        \
+       __asm__ ("sqrshrun2 %0.8h, %1.4s, #%2"                           \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqrshrun_high_n_s64(a, b, c)                                    \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x2_t b_ = (b);                                              \
+       uint32x2_t a_ = (a);                                             \
+       uint32x4_t result = vcombine_u32                                 \
+                             (a_, vcreate_u32 (UINT64_C (0x0)));        \
+       __asm__ ("sqrshrun2 %0.4s, %1.2d, #%2"                           \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqshrn_high_n_s16(a, b, c)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t b_ = (b);                                              \
+       int8x8_t a_ = (a);                                               \
+       int8x16_t result = vcombine_s8                                   \
+                            (a_, vcreate_s8 (UINT64_C (0x0)));          \
+       __asm__ ("sqshrn2 %0.16b, %1.8h, #%2"                            \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqshrn_high_n_s32(a, b, c)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t b_ = (b);                                              \
+       int16x4_t a_ = (a);                                              \
+       int16x8_t result = vcombine_s16                                  \
+                            (a_, vcreate_s16 (UINT64_C (0x0)));         \
+       __asm__ ("sqshrn2 %0.8h, %1.4s, #%2"                             \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqshrn_high_n_s64(a, b, c)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x2_t b_ = (b);                                              \
+       int32x2_t a_ = (a);                                              \
+       int32x4_t result = vcombine_s32                                  \
+                            (a_, vcreate_s32 (UINT64_C (0x0)));         \
+       __asm__ ("sqshrn2 %0.4s, %1.2d, #%2"                             \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqshrn_high_n_u16(a, b, c)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t b_ = (b);                                             \
+       uint8x8_t a_ = (a);                                              \
+       uint8x16_t result = vcombine_u8                                  \
+                             (a_, vcreate_u8 (UINT64_C (0x0)));         \
+       __asm__ ("uqshrn2 %0.16b, %1.8h, #%2"                            \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqshrn_high_n_u32(a, b, c)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t b_ = (b);                                             \
+       uint16x4_t a_ = (a);                                             \
+       uint16x8_t result = vcombine_u16                                 \
+                             (a_, vcreate_u16 (UINT64_C (0x0)));        \
+       __asm__ ("uqshrn2 %0.8h, %1.4s, #%2"                             \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqshrn_high_n_u64(a, b, c)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x2_t b_ = (b);                                             \
+       uint32x2_t a_ = (a);                                             \
+       uint32x4_t result = vcombine_u32                                 \
+                             (a_, vcreate_u32 (UINT64_C (0x0)));        \
+       __asm__ ("uqshrn2 %0.4s, %1.2d, #%2"                             \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqshrun_high_n_s16(a, b, c)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t b_ = (b);                                              \
+       uint8x8_t a_ = (a);                                              \
+       uint8x16_t result = vcombine_u8                                  \
+                             (a_, vcreate_u8 (UINT64_C (0x0)));         \
+       __asm__ ("sqshrun2 %0.16b, %1.8h, #%2"                           \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqshrun_high_n_s32(a, b, c)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t b_ = (b);                                              \
+       uint16x4_t a_ = (a);                                             \
+       uint16x8_t result = vcombine_u16                                 \
+                             (a_, vcreate_u16 (UINT64_C (0x0)));        \
+       __asm__ ("sqshrun2 %0.8h, %1.4s, #%2"                            \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vqshrun_high_n_s64(a, b, c)                                     \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x2_t b_ = (b);                                              \
+       uint32x2_t a_ = (a);                                             \
+       uint32x4_t result = vcombine_u32                                 \
+                             (a_, vcreate_u32 (UINT64_C (0x0)));        \
+       __asm__ ("sqshrun2 %0.4s, %1.2d, #%2"                            \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrbit_s8 (int8x8_t a)
+{
+  int8x8_t result;
+  __asm__ ("rbit %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrbit_u8 (uint8x8_t a)
+{
+  uint8x8_t result;
+  __asm__ ("rbit %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrbitq_s8 (int8x16_t a)
+{
+  int8x16_t result;
+  __asm__ ("rbit %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrbitq_u8 (uint8x16_t a)
+{
+  uint8x16_t result;
+  __asm__ ("rbit %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrecpe_f32 (float32x2_t a)
+{
+  float32x2_t result;
+  __asm__ ("frecpe %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrecpe_u32 (uint32x2_t a)
+{
+  uint32x2_t result;
+  __asm__ ("urecpe %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vrecped_f64 (float64_t a)
+{
+  float64_t result;
+  __asm__ ("frecpe %d0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrecpeq_f32 (float32x4_t a)
+{
+  float32x4_t result;
+  __asm__ ("frecpe %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrecpeq_f64 (float64x2_t a)
+{
+  float64x2_t result;
+  __asm__ ("frecpe %0.2d,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrecpeq_u32 (uint32x4_t a)
+{
+  uint32x4_t result;
+  __asm__ ("urecpe %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vrecpes_f32 (float32_t a)
+{
+  float32_t result;
+  __asm__ ("frecpe %s0,%s1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrecps_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("frecps %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vrecpsd_f64 (float64_t a, float64_t b)
+{
+  float64_t result;
+  __asm__ ("frecps %d0,%d1,%d2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrecpsq_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("frecps %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrecpsq_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("frecps %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vrecpss_f32 (float32_t a, float32_t b)
+{
+  float32_t result;
+  __asm__ ("frecps %s0,%s1,%s2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vrecpxd_f64 (float64_t a)
+{
+  float64_t result;
+  __asm__ ("frecpe %d0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vrecpxs_f32 (float32_t a)
+{
+  float32_t result;
+  __asm__ ("frecpe %s0,%s1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev16_p8 (poly8x8_t a)
+{
+  poly8x8_t result;
+  __asm__ ("rev16 %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev16_s8 (int8x8_t a)
+{
+  int8x8_t result;
+  __asm__ ("rev16 %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev16_u8 (uint8x8_t a)
+{
+  uint8x8_t result;
+  __asm__ ("rev16 %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev16q_p8 (poly8x16_t a)
+{
+  poly8x16_t result;
+  __asm__ ("rev16 %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev16q_s8 (int8x16_t a)
+{
+  int8x16_t result;
+  __asm__ ("rev16 %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev16q_u8 (uint8x16_t a)
+{
+  uint8x16_t result;
+  __asm__ ("rev16 %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev32_p8 (poly8x8_t a)
+{
+  poly8x8_t result;
+  __asm__ ("rev32 %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vrev32_p16 (poly16x4_t a)
+{
+  poly16x4_t result;
+  __asm__ ("rev32 %0.4h,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev32_s8 (int8x8_t a)
+{
+  int8x8_t result;
+  __asm__ ("rev32 %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrev32_s16 (int16x4_t a)
+{
+  int16x4_t result;
+  __asm__ ("rev32 %0.4h,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev32_u8 (uint8x8_t a)
+{
+  uint8x8_t result;
+  __asm__ ("rev32 %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrev32_u16 (uint16x4_t a)
+{
+  uint16x4_t result;
+  __asm__ ("rev32 %0.4h,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev32q_p8 (poly8x16_t a)
+{
+  poly8x16_t result;
+  __asm__ ("rev32 %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vrev32q_p16 (poly16x8_t a)
+{
+  poly16x8_t result;
+  __asm__ ("rev32 %0.8h,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev32q_s8 (int8x16_t a)
+{
+  int8x16_t result;
+  __asm__ ("rev32 %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrev32q_s16 (int16x8_t a)
+{
+  int16x8_t result;
+  __asm__ ("rev32 %0.8h,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev32q_u8 (uint8x16_t a)
+{
+  uint8x16_t result;
+  __asm__ ("rev32 %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrev32q_u16 (uint16x8_t a)
+{
+  uint16x8_t result;
+  __asm__ ("rev32 %0.8h,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrev64_f32 (float32x2_t a)
+{
+  float32x2_t result;
+  __asm__ ("rev64 %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev64_p8 (poly8x8_t a)
+{
+  poly8x8_t result;
+  __asm__ ("rev64 %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vrev64_p16 (poly16x4_t a)
+{
+  poly16x4_t result;
+  __asm__ ("rev64 %0.4h,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev64_s8 (int8x8_t a)
+{
+  int8x8_t result;
+  __asm__ ("rev64 %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrev64_s16 (int16x4_t a)
+{
+  int16x4_t result;
+  __asm__ ("rev64 %0.4h,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrev64_s32 (int32x2_t a)
+{
+  int32x2_t result;
+  __asm__ ("rev64 %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev64_u8 (uint8x8_t a)
+{
+  uint8x8_t result;
+  __asm__ ("rev64 %0.8b,%1.8b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrev64_u16 (uint16x4_t a)
+{
+  uint16x4_t result;
+  __asm__ ("rev64 %0.4h,%1.4h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrev64_u32 (uint32x2_t a)
+{
+  uint32x2_t result;
+  __asm__ ("rev64 %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrev64q_f32 (float32x4_t a)
+{
+  float32x4_t result;
+  __asm__ ("rev64 %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev64q_p8 (poly8x16_t a)
+{
+  poly8x16_t result;
+  __asm__ ("rev64 %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vrev64q_p16 (poly16x8_t a)
+{
+  poly16x8_t result;
+  __asm__ ("rev64 %0.8h,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev64q_s8 (int8x16_t a)
+{
+  int8x16_t result;
+  __asm__ ("rev64 %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrev64q_s16 (int16x8_t a)
+{
+  int16x8_t result;
+  __asm__ ("rev64 %0.8h,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrev64q_s32 (int32x4_t a)
+{
+  int32x4_t result;
+  __asm__ ("rev64 %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev64q_u8 (uint8x16_t a)
+{
+  uint8x16_t result;
+  __asm__ ("rev64 %0.16b,%1.16b"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrev64q_u16 (uint16x8_t a)
+{
+  uint16x8_t result;
+  __asm__ ("rev64 %0.8h,%1.8h"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrev64q_u32 (uint32x4_t a)
+{
+  uint32x4_t result;
+  __asm__ ("rev64 %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrnd_f32 (float32x2_t a)
+{
+  float32x2_t result;
+  __asm__ ("frintz %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrnda_f32 (float32x2_t a)
+{
+  float32x2_t result;
+  __asm__ ("frinta %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndm_f32 (float32x2_t a)
+{
+  float32x2_t result;
+  __asm__ ("frintm %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndn_f32 (float32x2_t a)
+{
+  float32x2_t result;
+  __asm__ ("frintn %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndp_f32 (float32x2_t a)
+{
+  float32x2_t result;
+  __asm__ ("frintp %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndq_f32 (float32x4_t a)
+{
+  float32x4_t result;
+  __asm__ ("frintz %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndq_f64 (float64x2_t a)
+{
+  float64x2_t result;
+  __asm__ ("frintz %0.2d,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqa_f32 (float32x4_t a)
+{
+  float32x4_t result;
+  __asm__ ("frinta %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndqa_f64 (float64x2_t a)
+{
+  float64x2_t result;
+  __asm__ ("frinta %0.2d,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqm_f32 (float32x4_t a)
+{
+  float32x4_t result;
+  __asm__ ("frintm %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndqm_f64 (float64x2_t a)
+{
+  float64x2_t result;
+  __asm__ ("frintm %0.2d,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqn_f32 (float32x4_t a)
+{
+  float32x4_t result;
+  __asm__ ("frintn %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndqn_f64 (float64x2_t a)
+{
+  float64x2_t result;
+  __asm__ ("frintn %0.2d,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqp_f32 (float32x4_t a)
+{
+  float32x4_t result;
+  __asm__ ("frintp %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndqp_f64 (float64x2_t a)
+{
+  float64x2_t result;
+  __asm__ ("frintp %0.2d,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vrshrn_high_n_s16(a, b, c)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t b_ = (b);                                              \
+       int8x8_t a_ = (a);                                               \
+       int8x16_t result = vcombine_s8                                   \
+                            (a_, vcreate_s8 (UINT64_C (0x0)));          \
+       __asm__ ("rshrn2 %0.16b,%1.8h,#%2"                               \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vrshrn_high_n_s32(a, b, c)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t b_ = (b);                                              \
+       int16x4_t a_ = (a);                                              \
+       int16x8_t result = vcombine_s16                                  \
+                            (a_, vcreate_s16 (UINT64_C (0x0)));         \
+       __asm__ ("rshrn2 %0.8h,%1.4s,#%2"                                \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vrshrn_high_n_s64(a, b, c)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x2_t b_ = (b);                                              \
+       int32x2_t a_ = (a);                                              \
+       int32x4_t result = vcombine_s32                                  \
+                            (a_, vcreate_s32 (UINT64_C (0x0)));         \
+       __asm__ ("rshrn2 %0.4s,%1.2d,#%2"                                \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vrshrn_high_n_u16(a, b, c)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t b_ = (b);                                             \
+       uint8x8_t a_ = (a);                                              \
+       uint8x16_t result = vcombine_u8                                  \
+                            (a_, vcreate_u8 (UINT64_C (0x0)));          \
+       __asm__ ("rshrn2 %0.16b,%1.8h,#%2"                               \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vrshrn_high_n_u32(a, b, c)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t b_ = (b);                                             \
+       uint16x4_t a_ = (a);                                             \
+       uint16x8_t result = vcombine_u16                                 \
+                            (a_, vcreate_u16 (UINT64_C (0x0)));         \
+       __asm__ ("rshrn2 %0.8h,%1.4s,#%2"                                \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vrshrn_high_n_u64(a, b, c)                                      \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x2_t b_ = (b);                                             \
+       uint32x2_t a_ = (a);                                             \
+       uint32x4_t result = vcombine_u32                                 \
+                            (a_, vcreate_u32 (UINT64_C (0x0)));         \
+       __asm__ ("rshrn2 %0.4s,%1.2d,#%2"                                \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vrshrn_n_s16(a, b)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t a_ = (a);                                              \
+       int8x8_t result;                                                 \
+       __asm__ ("rshrn %0.8b,%1.8h,%2"                                  \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vrshrn_n_s32(a, b)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t a_ = (a);                                              \
+       int16x4_t result;                                                \
+       __asm__ ("rshrn %0.4h,%1.4s,%2"                                  \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vrshrn_n_s64(a, b)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x2_t a_ = (a);                                              \
+       int32x2_t result;                                                \
+       __asm__ ("rshrn %0.2s,%1.2d,%2"                                  \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vrshrn_n_u16(a, b)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t a_ = (a);                                             \
+       uint8x8_t result;                                                \
+       __asm__ ("rshrn %0.8b,%1.8h,%2"                                  \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vrshrn_n_u32(a, b)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t a_ = (a);                                             \
+       uint16x4_t result;                                               \
+       __asm__ ("rshrn %0.4h,%1.4s,%2"                                  \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vrshrn_n_u64(a, b)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x2_t a_ = (a);                                             \
+       uint32x2_t result;                                               \
+       __asm__ ("rshrn %0.2s,%1.2d,%2"                                  \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrsqrte_f32 (float32x2_t a)
+{
+  float32x2_t result;
+  __asm__ ("frsqrte %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrsqrte_f64 (float64x2_t a)
+{
+  float64x2_t result;
+  __asm__ ("frsqrte %0.2d,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsqrte_u32 (uint32x2_t a)
+{
+  uint32x2_t result;
+  __asm__ ("ursqrte %0.2s,%1.2s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vrsqrted_f64 (float64_t a)
+{
+  float64_t result;
+  __asm__ ("frsqrte %d0,%d1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrsqrteq_f32 (float32x4_t a)
+{
+  float32x4_t result;
+  __asm__ ("frsqrte %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrsqrteq_f64 (float64x2_t a)
+{
+  float64x2_t result;
+  __asm__ ("frsqrte %0.2d,%1.2d"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrsqrteq_u32 (uint32x4_t a)
+{
+  uint32x4_t result;
+  __asm__ ("ursqrte %0.4s,%1.4s"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vrsqrtes_f32 (float32_t a)
+{
+  float32_t result;
+  __asm__ ("frsqrte %s0,%s1"
+           : "=w"(result)
+           : "w"(a)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrsqrts_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("frsqrts %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vrsqrtsd_f64 (float64_t a, float64_t b)
+{
+  float64_t result;
+  __asm__ ("frsqrts %d0,%d1,%d2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrsqrtsq_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("frsqrts %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrsqrtsq_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("frsqrts %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vrsqrtss_f32 (float32_t a, float32_t b)
+{
+  float32_t result;
+  __asm__ ("frsqrts %s0,%s1,%s2"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrsrtsq_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("frsqrts %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrsubhn_high_s16 (int8x8_t a, int16x8_t b, int16x8_t c)
+{
+  int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
+  __asm__ ("rsubhn2 %0.16b, %1.8h, %2.8h"
+           : "+w"(result)
+           : "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrsubhn_high_s32 (int16x4_t a, int32x4_t b, int32x4_t c)
+{
+  int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
+  __asm__ ("rsubhn2 %0.8h, %1.4s, %2.4s"
+           : "+w"(result)
+           : "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrsubhn_high_s64 (int32x2_t a, int64x2_t b, int64x2_t c)
+{
+  int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
+  __asm__ ("rsubhn2 %0.4s, %1.2d, %2.2d"
+           : "+w"(result)
+           : "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrsubhn_high_u16 (uint8x8_t a, uint16x8_t b, uint16x8_t c)
+{
+  uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
+  __asm__ ("rsubhn2 %0.16b, %1.8h, %2.8h"
+           : "+w"(result)
+           : "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrsubhn_high_u32 (uint16x4_t a, uint32x4_t b, uint32x4_t c)
+{
+  uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
+  __asm__ ("rsubhn2 %0.8h, %1.4s, %2.4s"
+           : "+w"(result)
+           : "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrsubhn_high_u64 (uint32x2_t a, uint64x2_t b, uint64x2_t c)
+{
+  uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
+  __asm__ ("rsubhn2 %0.4s, %1.2d, %2.2d"
+           : "+w"(result)
+           : "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrsubhn_s16 (int16x8_t a, int16x8_t b)
+{
+  int8x8_t result;
+  __asm__ ("rsubhn %0.8b, %1.8h, %2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrsubhn_s32 (int32x4_t a, int32x4_t b)
+{
+  int16x4_t result;
+  __asm__ ("rsubhn %0.4h, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrsubhn_s64 (int64x2_t a, int64x2_t b)
+{
+  int32x2_t result;
+  __asm__ ("rsubhn %0.2s, %1.2d, %2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrsubhn_u16 (uint16x8_t a, uint16x8_t b)
+{
+  uint8x8_t result;
+  __asm__ ("rsubhn %0.8b, %1.8h, %2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrsubhn_u32 (uint32x4_t a, uint32x4_t b)
+{
+  uint16x4_t result;
+  __asm__ ("rsubhn %0.4h, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsubhn_u64 (uint64x2_t a, uint64x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("rsubhn %0.2s, %1.2d, %2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+#define vset_lane_f32(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x2_t b_ = (b);                                            \
+       float32_t a_ = (a);                                              \
+       float32x2_t result;                                              \
+       __asm__ ("ins %0.s[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vset_lane_f64(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x1_t b_ = (b);                                            \
+       float64_t a_ = (a);                                              \
+       float64x1_t result;                                              \
+       __asm__ ("ins %0.d[%3], %x1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vset_lane_p8(a, b, c)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       poly8x8_t b_ = (b);                                              \
+       poly8_t a_ = (a);                                                \
+       poly8x8_t result;                                                \
+       __asm__ ("ins %0.b[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vset_lane_p16(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       poly16x4_t b_ = (b);                                             \
+       poly16_t a_ = (a);                                               \
+       poly16x4_t result;                                               \
+       __asm__ ("ins %0.h[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vset_lane_s8(a, b, c)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       int8x8_t b_ = (b);                                               \
+       int8_t a_ = (a);                                                 \
+       int8x8_t result;                                                 \
+       __asm__ ("ins %0.b[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vset_lane_s16(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x4_t b_ = (b);                                              \
+       int16_t a_ = (a);                                                \
+       int16x4_t result;                                                \
+       __asm__ ("ins %0.h[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vset_lane_s32(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x2_t b_ = (b);                                              \
+       int32_t a_ = (a);                                                \
+       int32x2_t result;                                                \
+       __asm__ ("ins %0.s[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vset_lane_s64(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x1_t b_ = (b);                                              \
+       int64_t a_ = (a);                                                \
+       int64x1_t result;                                                \
+       __asm__ ("ins %0.d[%3], %x1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vset_lane_u8(a, b, c)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       uint8x8_t b_ = (b);                                              \
+       uint8_t a_ = (a);                                                \
+       uint8x8_t result;                                                \
+       __asm__ ("ins %0.b[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vset_lane_u16(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x4_t b_ = (b);                                             \
+       uint16_t a_ = (a);                                               \
+       uint16x4_t result;                                               \
+       __asm__ ("ins %0.h[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vset_lane_u32(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x2_t b_ = (b);                                             \
+       uint32_t a_ = (a);                                               \
+       uint32x2_t result;                                               \
+       __asm__ ("ins %0.s[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vset_lane_u64(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x1_t b_ = (b);                                             \
+       uint64_t a_ = (a);                                               \
+       uint64x1_t result;                                               \
+       __asm__ ("ins %0.d[%3], %x1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsetq_lane_f32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t b_ = (b);                                            \
+       float32_t a_ = (a);                                              \
+       float32x4_t result;                                              \
+       __asm__ ("ins %0.s[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsetq_lane_f64(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x2_t b_ = (b);                                            \
+       float64_t a_ = (a);                                              \
+       float64x2_t result;                                              \
+       __asm__ ("ins %0.d[%3], %x1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsetq_lane_p8(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       poly8x16_t b_ = (b);                                             \
+       poly8_t a_ = (a);                                                \
+       poly8x16_t result;                                               \
+       __asm__ ("ins %0.b[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsetq_lane_p16(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       poly16x8_t b_ = (b);                                             \
+       poly16_t a_ = (a);                                               \
+       poly16x8_t result;                                               \
+       __asm__ ("ins %0.h[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsetq_lane_s8(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       int8x16_t b_ = (b);                                              \
+       int8_t a_ = (a);                                                 \
+       int8x16_t result;                                                \
+       __asm__ ("ins %0.b[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsetq_lane_s16(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t b_ = (b);                                              \
+       int16_t a_ = (a);                                                \
+       int16x8_t result;                                                \
+       __asm__ ("ins %0.h[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsetq_lane_s32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t b_ = (b);                                              \
+       int32_t a_ = (a);                                                \
+       int32x4_t result;                                                \
+       __asm__ ("ins %0.s[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsetq_lane_s64(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x2_t b_ = (b);                                              \
+       int64_t a_ = (a);                                                \
+       int64x2_t result;                                                \
+       __asm__ ("ins %0.d[%3], %x1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsetq_lane_u8(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       uint8x16_t b_ = (b);                                             \
+       uint8_t a_ = (a);                                                \
+       uint8x16_t result;                                               \
+       __asm__ ("ins %0.b[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsetq_lane_u16(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t b_ = (b);                                             \
+       uint16_t a_ = (a);                                               \
+       uint16x8_t result;                                               \
+       __asm__ ("ins %0.h[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsetq_lane_u32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t b_ = (b);                                             \
+       uint32_t a_ = (a);                                               \
+       uint32x4_t result;                                               \
+       __asm__ ("ins %0.s[%3], %w1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsetq_lane_u64(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x2_t b_ = (b);                                             \
+       uint64_t a_ = (a);                                               \
+       uint64x2_t result;                                               \
+       __asm__ ("ins %0.d[%3], %x1"                                     \
+                : "=w"(result)                                          \
+                : "r"(a_), "0"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vshrn_high_n_s16(a, b, c)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t b_ = (b);                                              \
+       int8x8_t a_ = (a);                                               \
+       int8x16_t result = vcombine_s8                                   \
+                            (a_, vcreate_s8 (UINT64_C (0x0)));          \
+       __asm__ ("shrn2 %0.16b,%1.8h,#%2"                                \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vshrn_high_n_s32(a, b, c)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t b_ = (b);                                              \
+       int16x4_t a_ = (a);                                              \
+       int16x8_t result = vcombine_s16                                  \
+                            (a_, vcreate_s16 (UINT64_C (0x0)));         \
+       __asm__ ("shrn2 %0.8h,%1.4s,#%2"                                 \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vshrn_high_n_s64(a, b, c)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x2_t b_ = (b);                                              \
+       int32x2_t a_ = (a);                                              \
+       int32x4_t result = vcombine_s32                                  \
+                            (a_, vcreate_s32 (UINT64_C (0x0)));         \
+       __asm__ ("shrn2 %0.4s,%1.2d,#%2"                                 \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vshrn_high_n_u16(a, b, c)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t b_ = (b);                                             \
+       uint8x8_t a_ = (a);                                              \
+       uint8x16_t result = vcombine_u8                                  \
+                            (a_, vcreate_u8 (UINT64_C (0x0)));          \
+       __asm__ ("shrn2 %0.16b,%1.8h,#%2"                                \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vshrn_high_n_u32(a, b, c)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t b_ = (b);                                             \
+       uint16x4_t a_ = (a);                                             \
+       uint16x8_t result = vcombine_u16                                 \
+                            (a_, vcreate_u16 (UINT64_C (0x0)));         \
+       __asm__ ("shrn2 %0.8h,%1.4s,#%2"                                 \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vshrn_high_n_u64(a, b, c)                                       \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x2_t b_ = (b);                                             \
+       uint32x2_t a_ = (a);                                             \
+       uint32x4_t result = vcombine_u32                                 \
+                            (a_, vcreate_u32 (UINT64_C (0x0)));         \
+       __asm__ ("shrn2 %0.4s,%1.2d,#%2"                                 \
+                : "+w"(result)                                          \
+                : "w"(b_), "i"(c)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vshrn_n_s16(a, b)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t a_ = (a);                                              \
+       int8x8_t result;                                                 \
+       __asm__ ("shrn %0.8b,%1.8h,%2"                                   \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vshrn_n_s32(a, b)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t a_ = (a);                                              \
+       int16x4_t result;                                                \
+       __asm__ ("shrn %0.4h,%1.4s,%2"                                   \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vshrn_n_s64(a, b)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x2_t a_ = (a);                                              \
+       int32x2_t result;                                                \
+       __asm__ ("shrn %0.2s,%1.2d,%2"                                   \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vshrn_n_u16(a, b)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t a_ = (a);                                             \
+       uint8x8_t result;                                                \
+       __asm__ ("shrn %0.8b,%1.8h,%2"                                   \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vshrn_n_u32(a, b)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t a_ = (a);                                             \
+       uint16x4_t result;                                               \
+       __asm__ ("shrn %0.4h,%1.4s,%2"                                   \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vshrn_n_u64(a, b)                                               \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x2_t a_ = (a);                                             \
+       uint32x2_t result;                                               \
+       __asm__ ("shrn %0.2s,%1.2d,%2"                                   \
+                : "=w"(result)                                          \
+                : "w"(a_), "i"(b)                                       \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsli_n_p8(a, b, c)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       poly8x8_t b_ = (b);                                              \
+       poly8x8_t a_ = (a);                                              \
+       poly8x8_t result;                                                \
+       __asm__ ("sli %0.8b,%2.8b,%3"                                    \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsli_n_p16(a, b, c)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       poly16x4_t b_ = (b);                                             \
+       poly16x4_t a_ = (a);                                             \
+       poly16x4_t result;                                               \
+       __asm__ ("sli %0.4h,%2.4h,%3"                                    \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsliq_n_p8(a, b, c)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       poly8x16_t b_ = (b);                                             \
+       poly8x16_t a_ = (a);                                             \
+       poly8x16_t result;                                               \
+       __asm__ ("sli %0.16b,%2.16b,%3"                                  \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsliq_n_p16(a, b, c)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       poly16x8_t b_ = (b);                                             \
+       poly16x8_t a_ = (a);                                             \
+       poly16x8_t result;                                               \
+       __asm__ ("sli %0.8h,%2.8h,%3"                                    \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsri_n_p8(a, b, c)                                              \
+  __extension__                                                         \
+    ({                                                                  \
+       poly8x8_t b_ = (b);                                              \
+       poly8x8_t a_ = (a);                                              \
+       poly8x8_t result;                                                \
+       __asm__ ("sri %0.8b,%2.8b,%3"                                    \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsri_n_p16(a, b, c)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       poly16x4_t b_ = (b);                                             \
+       poly16x4_t a_ = (a);                                             \
+       poly16x4_t result;                                               \
+       __asm__ ("sri %0.4h,%2.4h,%3"                                    \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsriq_n_p8(a, b, c)                                             \
+  __extension__                                                         \
+    ({                                                                  \
+       poly8x16_t b_ = (b);                                             \
+       poly8x16_t a_ = (a);                                             \
+       poly8x16_t result;                                               \
+       __asm__ ("sri %0.16b,%2.16b,%3"                                  \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+#define vsriq_n_p16(a, b, c)                                            \
+  __extension__                                                         \
+    ({                                                                  \
+       poly16x8_t b_ = (b);                                             \
+       poly16x8_t a_ = (a);                                             \
+       poly16x8_t result;                                               \
+       __asm__ ("sri %0.8h,%2.8h,%3"                                    \
+                : "=w"(result)                                          \
+                : "0"(a_), "w"(b_), "i"(c)                              \
+                : /* No clobbers */);                                   \
+       result;                                                          \
+     })
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_f32 (float32_t * a, float32x2_t b)
+{
+  __asm__ ("st1 {%1.2s},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_f64 (float64_t * a, float64x1_t b)
+{
+  __asm__ ("st1 {%1.1d},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+#define vst1_lane_f32(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x2_t b_ = (b);                                            \
+       float32_t * a_ = (a);                                            \
+       __asm__ ("st1 {%1.s}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1_lane_f64(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x1_t b_ = (b);                                            \
+       float64_t * a_ = (a);                                            \
+       __asm__ ("st1 {%1.d}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1_lane_p8(a, b, c)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       poly8x8_t b_ = (b);                                              \
+       poly8_t * a_ = (a);                                              \
+       __asm__ ("st1 {%1.b}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1_lane_p16(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       poly16x4_t b_ = (b);                                             \
+       poly16_t * a_ = (a);                                             \
+       __asm__ ("st1 {%1.h}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1_lane_s8(a, b, c)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       int8x8_t b_ = (b);                                               \
+       int8_t * a_ = (a);                                               \
+       __asm__ ("st1 {%1.b}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1_lane_s16(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x4_t b_ = (b);                                              \
+       int16_t * a_ = (a);                                              \
+       __asm__ ("st1 {%1.h}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1_lane_s32(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x2_t b_ = (b);                                              \
+       int32_t * a_ = (a);                                              \
+       __asm__ ("st1 {%1.s}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1_lane_s64(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x1_t b_ = (b);                                              \
+       int64_t * a_ = (a);                                              \
+       __asm__ ("st1 {%1.d}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1_lane_u8(a, b, c)                                           \
+  __extension__                                                         \
+    ({                                                                  \
+       uint8x8_t b_ = (b);                                              \
+       uint8_t * a_ = (a);                                              \
+       __asm__ ("st1 {%1.b}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1_lane_u16(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x4_t b_ = (b);                                             \
+       uint16_t * a_ = (a);                                             \
+       __asm__ ("st1 {%1.h}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1_lane_u32(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x2_t b_ = (b);                                             \
+       uint32_t * a_ = (a);                                             \
+       __asm__ ("st1 {%1.s}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1_lane_u64(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x1_t b_ = (b);                                             \
+       uint64_t * a_ = (a);                                             \
+       __asm__ ("st1 {%1.d}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_p8 (poly8_t * a, poly8x8_t b)
+{
+  __asm__ ("st1 {%1.8b},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_p16 (poly16_t * a, poly16x4_t b)
+{
+  __asm__ ("st1 {%1.4h},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s8 (int8_t * a, int8x8_t b)
+{
+  __asm__ ("st1 {%1.8b},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s16 (int16_t * a, int16x4_t b)
+{
+  __asm__ ("st1 {%1.4h},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s32 (int32_t * a, int32x2_t b)
+{
+  __asm__ ("st1 {%1.2s},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s64 (int64_t * a, int64x1_t b)
+{
+  __asm__ ("st1 {%1.1d},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u8 (uint8_t * a, uint8x8_t b)
+{
+  __asm__ ("st1 {%1.8b},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u16 (uint16_t * a, uint16x4_t b)
+{
+  __asm__ ("st1 {%1.4h},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u32 (uint32_t * a, uint32x2_t b)
+{
+  __asm__ ("st1 {%1.2s},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u64 (uint64_t * a, uint64x1_t b)
+{
+  __asm__ ("st1 {%1.1d},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_f32 (float32_t * a, float32x4_t b)
+{
+  __asm__ ("st1 {%1.4s},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_f64 (float64_t * a, float64x2_t b)
+{
+  __asm__ ("st1 {%1.2d},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+#define vst1q_lane_f32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float32x4_t b_ = (b);                                            \
+       float32_t * a_ = (a);                                            \
+       __asm__ ("st1 {%1.s}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1q_lane_f64(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       float64x2_t b_ = (b);                                            \
+       float64_t * a_ = (a);                                            \
+       __asm__ ("st1 {%1.d}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1q_lane_p8(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       poly8x16_t b_ = (b);                                             \
+       poly8_t * a_ = (a);                                              \
+       __asm__ ("st1 {%1.b}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1q_lane_p16(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       poly16x8_t b_ = (b);                                             \
+       poly16_t * a_ = (a);                                             \
+       __asm__ ("st1 {%1.h}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1q_lane_s8(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       int8x16_t b_ = (b);                                              \
+       int8_t * a_ = (a);                                               \
+       __asm__ ("st1 {%1.b}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1q_lane_s16(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       int16x8_t b_ = (b);                                              \
+       int16_t * a_ = (a);                                              \
+       __asm__ ("st1 {%1.h}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1q_lane_s32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       int32x4_t b_ = (b);                                              \
+       int32_t * a_ = (a);                                              \
+       __asm__ ("st1 {%1.s}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1q_lane_s64(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       int64x2_t b_ = (b);                                              \
+       int64_t * a_ = (a);                                              \
+       __asm__ ("st1 {%1.d}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1q_lane_u8(a, b, c)                                          \
+  __extension__                                                         \
+    ({                                                                  \
+       uint8x16_t b_ = (b);                                             \
+       uint8_t * a_ = (a);                                              \
+       __asm__ ("st1 {%1.b}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1q_lane_u16(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       uint16x8_t b_ = (b);                                             \
+       uint16_t * a_ = (a);                                             \
+       __asm__ ("st1 {%1.h}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1q_lane_u32(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       uint32x4_t b_ = (b);                                             \
+       uint32_t * a_ = (a);                                             \
+       __asm__ ("st1 {%1.s}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+#define vst1q_lane_u64(a, b, c)                                         \
+  __extension__                                                         \
+    ({                                                                  \
+       uint64x2_t b_ = (b);                                             \
+       uint64_t * a_ = (a);                                             \
+       __asm__ ("st1 {%1.d}[%2],[%0]"                                   \
+                :                                                       \
+                : "r"(a_), "w"(b_), "i"(c)                              \
+                : "memory");                                            \
+     })
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_p8 (poly8_t * a, poly8x16_t b)
+{
+  __asm__ ("st1 {%1.16b},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_p16 (poly16_t * a, poly16x8_t b)
+{
+  __asm__ ("st1 {%1.8h},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s8 (int8_t * a, int8x16_t b)
+{
+  __asm__ ("st1 {%1.16b},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s16 (int16_t * a, int16x8_t b)
+{
+  __asm__ ("st1 {%1.8h},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s32 (int32_t * a, int32x4_t b)
+{
+  __asm__ ("st1 {%1.4s},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s64 (int64_t * a, int64x2_t b)
+{
+  __asm__ ("st1 {%1.2d},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u8 (uint8_t * a, uint8x16_t b)
+{
+  __asm__ ("st1 {%1.16b},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u16 (uint16_t * a, uint16x8_t b)
+{
+  __asm__ ("st1 {%1.8h},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u32 (uint32_t * a, uint32x4_t b)
+{
+  __asm__ ("st1 {%1.4s},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u64 (uint64_t * a, uint64x2_t b)
+{
+  __asm__ ("st1 {%1.2d},[%0]"
+           :
+           : "r"(a), "w"(b)
+           : "memory");
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsubhn_high_s16 (int8x8_t a, int16x8_t b, int16x8_t c)
+{
+  int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
+  __asm__ ("subhn2 %0.16b, %1.8h, %2.8h"
+           : "+w"(result)
+           : "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubhn_high_s32 (int16x4_t a, int32x4_t b, int32x4_t c)
+{
+  int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
+  __asm__ ("subhn2 %0.8h, %1.4s, %2.4s"
+           : "+w"(result)
+           : "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubhn_high_s64 (int32x2_t a, int64x2_t b, int64x2_t c)
+{
+  int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
+  __asm__ ("subhn2 %0.4s, %1.2d, %2.2d"
+           : "+w"(result)
+           : "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsubhn_high_u16 (uint8x8_t a, uint16x8_t b, uint16x8_t c)
+{
+  uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
+  __asm__ ("subhn2 %0.16b, %1.8h, %2.8h"
+           : "+w"(result)
+           : "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubhn_high_u32 (uint16x4_t a, uint32x4_t b, uint32x4_t c)
+{
+  uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
+  __asm__ ("subhn2 %0.8h, %1.4s, %2.4s"
+           : "+w"(result)
+           : "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubhn_high_u64 (uint32x2_t a, uint64x2_t b, uint64x2_t c)
+{
+  uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
+  __asm__ ("subhn2 %0.4s, %1.2d, %2.2d"
+           : "+w"(result)
+           : "w"(b), "w"(c)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsubhn_s16 (int16x8_t a, int16x8_t b)
+{
+  int8x8_t result;
+  __asm__ ("subhn %0.8b, %1.8h, %2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsubhn_s32 (int32x4_t a, int32x4_t b)
+{
+  int16x4_t result;
+  __asm__ ("subhn %0.4h, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsubhn_s64 (int64x2_t a, int64x2_t b)
+{
+  int32x2_t result;
+  __asm__ ("subhn %0.2s, %1.2d, %2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsubhn_u16 (uint16x8_t a, uint16x8_t b)
+{
+  uint8x8_t result;
+  __asm__ ("subhn %0.8b, %1.8h, %2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsubhn_u32 (uint32x4_t a, uint32x4_t b)
+{
+  uint16x4_t result;
+  __asm__ ("subhn %0.4h, %1.4s, %2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsubhn_u64 (uint64x2_t a, uint64x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("subhn %0.2s, %1.2d, %2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vtrn1_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("trn1 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtrn1_p8 (poly8x8_t a, poly8x8_t b)
+{
+  poly8x8_t result;
+  __asm__ ("trn1 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vtrn1_p16 (poly16x4_t a, poly16x4_t b)
+{
+  poly16x4_t result;
+  __asm__ ("trn1 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtrn1_s8 (int8x8_t a, int8x8_t b)
+{
+  int8x8_t result;
+  __asm__ ("trn1 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vtrn1_s16 (int16x4_t a, int16x4_t b)
+{
+  int16x4_t result;
+  __asm__ ("trn1 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vtrn1_s32 (int32x2_t a, int32x2_t b)
+{
+  int32x2_t result;
+  __asm__ ("trn1 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtrn1_u8 (uint8x8_t a, uint8x8_t b)
+{
+  uint8x8_t result;
+  __asm__ ("trn1 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtrn1_u16 (uint16x4_t a, uint16x4_t b)
+{
+  uint16x4_t result;
+  __asm__ ("trn1 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtrn1_u32 (uint32x2_t a, uint32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("trn1 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vtrn1q_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("trn1 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vtrn1q_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("trn1 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vtrn1q_p8 (poly8x16_t a, poly8x16_t b)
+{
+  poly8x16_t result;
+  __asm__ ("trn1 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vtrn1q_p16 (poly16x8_t a, poly16x8_t b)
+{
+  poly16x8_t result;
+  __asm__ ("trn1 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vtrn1q_s8 (int8x16_t a, int8x16_t b)
+{
+  int8x16_t result;
+  __asm__ ("trn1 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vtrn1q_s16 (int16x8_t a, int16x8_t b)
+{
+  int16x8_t result;
+  __asm__ ("trn1 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vtrn1q_s32 (int32x4_t a, int32x4_t b)
+{
+  int32x4_t result;
+  __asm__ ("trn1 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vtrn1q_s64 (int64x2_t a, int64x2_t b)
+{
+  int64x2_t result;
+  __asm__ ("trn1 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtrn1q_u8 (uint8x16_t a, uint8x16_t b)
+{
+  uint8x16_t result;
+  __asm__ ("trn1 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtrn1q_u16 (uint16x8_t a, uint16x8_t b)
+{
+  uint16x8_t result;
+  __asm__ ("trn1 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtrn1q_u32 (uint32x4_t a, uint32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("trn1 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vtrn1q_u64 (uint64x2_t a, uint64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("trn1 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vtrn2_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("trn2 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtrn2_p8 (poly8x8_t a, poly8x8_t b)
+{
+  poly8x8_t result;
+  __asm__ ("trn2 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vtrn2_p16 (poly16x4_t a, poly16x4_t b)
+{
+  poly16x4_t result;
+  __asm__ ("trn2 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtrn2_s8 (int8x8_t a, int8x8_t b)
+{
+  int8x8_t result;
+  __asm__ ("trn2 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vtrn2_s16 (int16x4_t a, int16x4_t b)
+{
+  int16x4_t result;
+  __asm__ ("trn2 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vtrn2_s32 (int32x2_t a, int32x2_t b)
+{
+  int32x2_t result;
+  __asm__ ("trn2 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtrn2_u8 (uint8x8_t a, uint8x8_t b)
+{
+  uint8x8_t result;
+  __asm__ ("trn2 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtrn2_u16 (uint16x4_t a, uint16x4_t b)
+{
+  uint16x4_t result;
+  __asm__ ("trn2 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtrn2_u32 (uint32x2_t a, uint32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("trn2 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vtrn2q_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("trn2 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vtrn2q_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("trn2 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vtrn2q_p8 (poly8x16_t a, poly8x16_t b)
+{
+  poly8x16_t result;
+  __asm__ ("trn2 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vtrn2q_p16 (poly16x8_t a, poly16x8_t b)
+{
+  poly16x8_t result;
+  __asm__ ("trn2 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vtrn2q_s8 (int8x16_t a, int8x16_t b)
+{
+  int8x16_t result;
+  __asm__ ("trn2 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vtrn2q_s16 (int16x8_t a, int16x8_t b)
+{
+  int16x8_t result;
+  __asm__ ("trn2 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vtrn2q_s32 (int32x4_t a, int32x4_t b)
+{
+  int32x4_t result;
+  __asm__ ("trn2 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vtrn2q_s64 (int64x2_t a, int64x2_t b)
+{
+  int64x2_t result;
+  __asm__ ("trn2 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtrn2q_u8 (uint8x16_t a, uint8x16_t b)
+{
+  uint8x16_t result;
+  __asm__ ("trn2 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtrn2q_u16 (uint16x8_t a, uint16x8_t b)
+{
+  uint16x8_t result;
+  __asm__ ("trn2 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtrn2q_u32 (uint32x4_t a, uint32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("trn2 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vtrn2q_u64 (uint64x2_t a, uint64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("trn2 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_p8 (poly8x8_t a, poly8x8_t b)
+{
+  uint8x8_t result;
+  __asm__ ("cmtst %0.8b, %1.8b, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtst_p16 (poly16x4_t a, poly16x4_t b)
+{
+  uint16x4_t result;
+  __asm__ ("cmtst %0.4h, %1.4h, %2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_p8 (poly8x16_t a, poly8x16_t b)
+{
+  uint8x16_t result;
+  __asm__ ("cmtst %0.16b, %1.16b, %2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtstq_p16 (poly16x8_t a, poly16x8_t b)
+{
+  uint16x8_t result;
+  __asm__ ("cmtst %0.8h, %1.8h, %2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vuzp1_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vuzp1_p8 (poly8x8_t a, poly8x8_t b)
+{
+  poly8x8_t result;
+  __asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vuzp1_p16 (poly16x4_t a, poly16x4_t b)
+{
+  poly16x4_t result;
+  __asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vuzp1_s8 (int8x8_t a, int8x8_t b)
+{
+  int8x8_t result;
+  __asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vuzp1_s16 (int16x4_t a, int16x4_t b)
+{
+  int16x4_t result;
+  __asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vuzp1_s32 (int32x2_t a, int32x2_t b)
+{
+  int32x2_t result;
+  __asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vuzp1_u8 (uint8x8_t a, uint8x8_t b)
+{
+  uint8x8_t result;
+  __asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vuzp1_u16 (uint16x4_t a, uint16x4_t b)
+{
+  uint16x4_t result;
+  __asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vuzp1_u32 (uint32x2_t a, uint32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vuzp1q_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vuzp1q_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vuzp1q_p8 (poly8x16_t a, poly8x16_t b)
+{
+  poly8x16_t result;
+  __asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vuzp1q_p16 (poly16x8_t a, poly16x8_t b)
+{
+  poly16x8_t result;
+  __asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vuzp1q_s8 (int8x16_t a, int8x16_t b)
+{
+  int8x16_t result;
+  __asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vuzp1q_s16 (int16x8_t a, int16x8_t b)
+{
+  int16x8_t result;
+  __asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vuzp1q_s32 (int32x4_t a, int32x4_t b)
+{
+  int32x4_t result;
+  __asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vuzp1q_s64 (int64x2_t a, int64x2_t b)
+{
+  int64x2_t result;
+  __asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vuzp1q_u8 (uint8x16_t a, uint8x16_t b)
+{
+  uint8x16_t result;
+  __asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vuzp1q_u16 (uint16x8_t a, uint16x8_t b)
+{
+  uint16x8_t result;
+  __asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vuzp1q_u32 (uint32x4_t a, uint32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vuzp1q_u64 (uint64x2_t a, uint64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vuzp2_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vuzp2_p8 (poly8x8_t a, poly8x8_t b)
+{
+  poly8x8_t result;
+  __asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vuzp2_p16 (poly16x4_t a, poly16x4_t b)
+{
+  poly16x4_t result;
+  __asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vuzp2_s8 (int8x8_t a, int8x8_t b)
+{
+  int8x8_t result;
+  __asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vuzp2_s16 (int16x4_t a, int16x4_t b)
+{
+  int16x4_t result;
+  __asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vuzp2_s32 (int32x2_t a, int32x2_t b)
+{
+  int32x2_t result;
+  __asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vuzp2_u8 (uint8x8_t a, uint8x8_t b)
+{
+  uint8x8_t result;
+  __asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vuzp2_u16 (uint16x4_t a, uint16x4_t b)
+{
+  uint16x4_t result;
+  __asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vuzp2_u32 (uint32x2_t a, uint32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vuzp2q_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vuzp2q_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vuzp2q_p8 (poly8x16_t a, poly8x16_t b)
+{
+  poly8x16_t result;
+  __asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vuzp2q_p16 (poly16x8_t a, poly16x8_t b)
+{
+  poly16x8_t result;
+  __asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vuzp2q_s8 (int8x16_t a, int8x16_t b)
+{
+  int8x16_t result;
+  __asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vuzp2q_s16 (int16x8_t a, int16x8_t b)
+{
+  int16x8_t result;
+  __asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vuzp2q_s32 (int32x4_t a, int32x4_t b)
+{
+  int32x4_t result;
+  __asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vuzp2q_s64 (int64x2_t a, int64x2_t b)
+{
+  int64x2_t result;
+  __asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vuzp2q_u8 (uint8x16_t a, uint8x16_t b)
+{
+  uint8x16_t result;
+  __asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vuzp2q_u16 (uint16x8_t a, uint16x8_t b)
+{
+  uint16x8_t result;
+  __asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vuzp2q_u32 (uint32x4_t a, uint32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vuzp2q_u64 (uint64x2_t a, uint64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vzip1_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("zip1 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vzip1_p8 (poly8x8_t a, poly8x8_t b)
+{
+  poly8x8_t result;
+  __asm__ ("zip1 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vzip1_p16 (poly16x4_t a, poly16x4_t b)
+{
+  poly16x4_t result;
+  __asm__ ("zip1 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vzip1_s8 (int8x8_t a, int8x8_t b)
+{
+  int8x8_t result;
+  __asm__ ("zip1 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vzip1_s16 (int16x4_t a, int16x4_t b)
+{
+  int16x4_t result;
+  __asm__ ("zip1 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vzip1_s32 (int32x2_t a, int32x2_t b)
+{
+  int32x2_t result;
+  __asm__ ("zip1 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vzip1_u8 (uint8x8_t a, uint8x8_t b)
+{
+  uint8x8_t result;
+  __asm__ ("zip1 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vzip1_u16 (uint16x4_t a, uint16x4_t b)
+{
+  uint16x4_t result;
+  __asm__ ("zip1 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vzip1_u32 (uint32x2_t a, uint32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("zip1 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vzip1q_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("zip1 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vzip1q_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("zip1 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vzip1q_p8 (poly8x16_t a, poly8x16_t b)
+{
+  poly8x16_t result;
+  __asm__ ("zip1 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vzip1q_p16 (poly16x8_t a, poly16x8_t b)
+{
+  poly16x8_t result;
+  __asm__ ("zip1 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vzip1q_s8 (int8x16_t a, int8x16_t b)
+{
+  int8x16_t result;
+  __asm__ ("zip1 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vzip1q_s16 (int16x8_t a, int16x8_t b)
+{
+  int16x8_t result;
+  __asm__ ("zip1 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vzip1q_s32 (int32x4_t a, int32x4_t b)
+{
+  int32x4_t result;
+  __asm__ ("zip1 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vzip1q_s64 (int64x2_t a, int64x2_t b)
+{
+  int64x2_t result;
+  __asm__ ("zip1 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vzip1q_u8 (uint8x16_t a, uint8x16_t b)
+{
+  uint8x16_t result;
+  __asm__ ("zip1 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vzip1q_u16 (uint16x8_t a, uint16x8_t b)
+{
+  uint16x8_t result;
+  __asm__ ("zip1 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vzip1q_u32 (uint32x4_t a, uint32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("zip1 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vzip1q_u64 (uint64x2_t a, uint64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("zip1 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vzip2_f32 (float32x2_t a, float32x2_t b)
+{
+  float32x2_t result;
+  __asm__ ("zip2 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vzip2_p8 (poly8x8_t a, poly8x8_t b)
+{
+  poly8x8_t result;
+  __asm__ ("zip2 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vzip2_p16 (poly16x4_t a, poly16x4_t b)
+{
+  poly16x4_t result;
+  __asm__ ("zip2 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vzip2_s8 (int8x8_t a, int8x8_t b)
+{
+  int8x8_t result;
+  __asm__ ("zip2 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vzip2_s16 (int16x4_t a, int16x4_t b)
+{
+  int16x4_t result;
+  __asm__ ("zip2 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vzip2_s32 (int32x2_t a, int32x2_t b)
+{
+  int32x2_t result;
+  __asm__ ("zip2 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vzip2_u8 (uint8x8_t a, uint8x8_t b)
+{
+  uint8x8_t result;
+  __asm__ ("zip2 %0.8b,%1.8b,%2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vzip2_u16 (uint16x4_t a, uint16x4_t b)
+{
+  uint16x4_t result;
+  __asm__ ("zip2 %0.4h,%1.4h,%2.4h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vzip2_u32 (uint32x2_t a, uint32x2_t b)
+{
+  uint32x2_t result;
+  __asm__ ("zip2 %0.2s,%1.2s,%2.2s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vzip2q_f32 (float32x4_t a, float32x4_t b)
+{
+  float32x4_t result;
+  __asm__ ("zip2 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vzip2q_f64 (float64x2_t a, float64x2_t b)
+{
+  float64x2_t result;
+  __asm__ ("zip2 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vzip2q_p8 (poly8x16_t a, poly8x16_t b)
+{
+  poly8x16_t result;
+  __asm__ ("zip2 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vzip2q_p16 (poly16x8_t a, poly16x8_t b)
+{
+  poly16x8_t result;
+  __asm__ ("zip2 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vzip2q_s8 (int8x16_t a, int8x16_t b)
+{
+  int8x16_t result;
+  __asm__ ("zip2 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vzip2q_s16 (int16x8_t a, int16x8_t b)
+{
+  int16x8_t result;
+  __asm__ ("zip2 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vzip2q_s32 (int32x4_t a, int32x4_t b)
+{
+  int32x4_t result;
+  __asm__ ("zip2 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vzip2q_s64 (int64x2_t a, int64x2_t b)
+{
+  int64x2_t result;
+  __asm__ ("zip2 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vzip2q_u8 (uint8x16_t a, uint8x16_t b)
+{
+  uint8x16_t result;
+  __asm__ ("zip2 %0.16b,%1.16b,%2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vzip2q_u16 (uint16x8_t a, uint16x8_t b)
+{
+  uint16x8_t result;
+  __asm__ ("zip2 %0.8h,%1.8h,%2.8h"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vzip2q_u32 (uint32x4_t a, uint32x4_t b)
+{
+  uint32x4_t result;
+  __asm__ ("zip2 %0.4s,%1.4s,%2.4s"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vzip2q_u64 (uint64x2_t a, uint64x2_t b)
+{
+  uint64x2_t result;
+  __asm__ ("zip2 %0.2d,%1.2d,%2.2d"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+/* End of temporary inline asm implementations.  */
+
+/* Start of temporary inline asm for vldn, vstn and friends.  */
+
+/* Create struct element types for duplicating loads.
+
+   Create 2 element structures of:
+
+   +------+----+----+----+----+
+   |      | 8  | 16 | 32 | 64 |
+   +------+----+----+----+----+
+   |int   | Y  | Y  | N  | N  |
+   +------+----+----+----+----+
+   |uint  | Y  | Y  | N  | N  |
+   +------+----+----+----+----+
+   |float | -  | -  | N  | N  |
+   +------+----+----+----+----+
+   |poly  | Y  | Y  | -  | -  |
+   +------+----+----+----+----+
+
+   Create 3 element structures of:
+
+   +------+----+----+----+----+
+   |      | 8  | 16 | 32 | 64 |
+   +------+----+----+----+----+
+   |int   | Y  | Y  | Y  | Y  |
+   +------+----+----+----+----+
+   |uint  | Y  | Y  | Y  | Y  |
+   +------+----+----+----+----+
+   |float | -  | -  | Y  | Y  |
+   +------+----+----+----+----+
+   |poly  | Y  | Y  | -  | -  |
+   +------+----+----+----+----+
+
+   Create 4 element structures of:
+
+   +------+----+----+----+----+
+   |      | 8  | 16 | 32 | 64 |
+   +------+----+----+----+----+
+   |int   | Y  | N  | N  | Y  |
+   +------+----+----+----+----+
+   |uint  | Y  | N  | N  | Y  |
+   +------+----+----+----+----+
+   |float | -  | -  | N  | Y  |
+   +------+----+----+----+----+
+   |poly  | Y  | N  | -  | -  |
+   +------+----+----+----+----+
+
+  This is required for casting memory reference.  */
+#define __STRUCTN(t, sz, nelem)                        \
+  typedef struct t ## sz ## x ## nelem ## _t { \
+    t ## sz ## _t val[nelem];                  \
+  }  t ## sz ## x ## nelem ## _t;
+
+/* 2-element structs.  */
+__STRUCTN (int, 8, 2)
+__STRUCTN (int, 16, 2)
+__STRUCTN (uint, 8, 2)
+__STRUCTN (uint, 16, 2)
+__STRUCTN (poly, 8, 2)
+__STRUCTN (poly, 16, 2)
+/* 3-element structs.  */
+__STRUCTN (int, 8, 3)
+__STRUCTN (int, 16, 3)
+__STRUCTN (int, 32, 3)
+__STRUCTN (int, 64, 3)
+__STRUCTN (uint, 8, 3)
+__STRUCTN (uint, 16, 3)
+__STRUCTN (uint, 32, 3)
+__STRUCTN (uint, 64, 3)
+__STRUCTN (float, 32, 3)
+__STRUCTN (float, 64, 3)
+__STRUCTN (poly, 8, 3)
+__STRUCTN (poly, 16, 3)
+/* 4-element structs.  */
+__STRUCTN (int, 8, 4)
+__STRUCTN (int, 64, 4)
+__STRUCTN (uint, 8, 4)
+__STRUCTN (uint, 64, 4)
+__STRUCTN (poly, 8, 4)
+__STRUCTN (float, 64, 4)
+#undef __STRUCTN
+
+#define __LD2R_FUNC(rettype, structtype, ptrtype,                      \
+                   regsuffix, funcsuffix, Q)                           \
+  __extension__ static __inline rettype                                        \
+  __attribute__ ((__always_inline__))                                  \
+  vld2 ## Q ## _dup_ ## funcsuffix (const ptrtype *ptr)                        \
+  {                                                                    \
+    rettype result;                                                    \
+    __asm__ ("ld2r {v16." #regsuffix ", v17." #regsuffix "}, %1\n\t"   \
+            "st1 {v16." #regsuffix ", v17." #regsuffix "}, %0\n\t"     \
+            : "=Q"(result)                                             \
+            : "Q"(*(const structtype *)ptr)                            \
+            : "memory", "v16", "v17");                                 \
+    return result;                                                     \
+  }
+
+__LD2R_FUNC (float32x2x2_t, float32x2_t, float32_t, 2s, f32,)
+__LD2R_FUNC (float64x1x2_t, float64x2_t, float64_t, 1d, f64,)
+__LD2R_FUNC (poly8x8x2_t, poly8x2_t, poly8_t, 8b, p8,)
+__LD2R_FUNC (poly16x4x2_t, poly16x2_t, poly16_t, 4h, p16,)
+__LD2R_FUNC (int8x8x2_t, int8x2_t, int8_t, 8b, s8,)
+__LD2R_FUNC (int16x4x2_t, int16x2_t, int16_t, 4h, s16,)
+__LD2R_FUNC (int32x2x2_t, int32x2_t, int32_t, 2s, s32,)
+__LD2R_FUNC (int64x1x2_t, int64x2_t, int64_t, 1d, s64,)
+__LD2R_FUNC (uint8x8x2_t, uint8x2_t, uint8_t, 8b, u8,)
+__LD2R_FUNC (uint16x4x2_t, uint16x2_t, uint16_t, 4h, u16,)
+__LD2R_FUNC (uint32x2x2_t, uint32x2_t, uint32_t, 2s, u32,)
+__LD2R_FUNC (uint64x1x2_t, uint64x2_t, uint64_t, 1d, u64,)
+__LD2R_FUNC (float32x4x2_t, float32x2_t, float32_t, 4s, f32, q)
+__LD2R_FUNC (float64x2x2_t, float64x2_t, float64_t, 2d, f64, q)
+__LD2R_FUNC (poly8x16x2_t, poly8x2_t, poly8_t, 16b, p8, q)
+__LD2R_FUNC (poly16x8x2_t, poly16x2_t, poly16_t, 8h, p16, q)
+__LD2R_FUNC (int8x16x2_t, int8x2_t, int8_t, 16b, s8, q)
+__LD2R_FUNC (int16x8x2_t, int16x2_t, int16_t, 8h, s16, q)
+__LD2R_FUNC (int32x4x2_t, int32x2_t, int32_t, 4s, s32, q)
+__LD2R_FUNC (int64x2x2_t, int64x2_t, int64_t, 2d, s64, q)
+__LD2R_FUNC (uint8x16x2_t, uint8x2_t, uint8_t, 16b, u8, q)
+__LD2R_FUNC (uint16x8x2_t, uint16x2_t, uint16_t, 8h, u16, q)
+__LD2R_FUNC (uint32x4x2_t, uint32x2_t, uint32_t, 4s, u32, q)
+__LD2R_FUNC (uint64x2x2_t, uint64x2_t, uint64_t, 2d, u64, q)
+
+#define __LD2_LANE_FUNC(rettype, ptrtype, regsuffix,                   \
+                       lnsuffix, funcsuffix, Q)                        \
+  __extension__ static __inline rettype                                        \
+  __attribute__ ((__always_inline__))                                  \
+  vld2 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr,               \
+                                    rettype b, const int c)            \
+  {                                                                    \
+    rettype result;                                                    \
+    __asm__ ("ld1 {v16." #regsuffix ", v17." #regsuffix "}, %1\n\t"    \
+            "ld2 {v16." #lnsuffix ", v17." #lnsuffix "}[%3], %2\n\t"   \
+            "st1 {v16." #regsuffix ", v17." #regsuffix "}, %0\n\t"     \
+            : "=Q"(result)                                             \
+            : "Q"(b), "Q"(*(const rettype *)ptr), "i"(c)               \
+            : "memory", "v16", "v17");                                 \
+    return result;                                                     \
+  }
+
+__LD2_LANE_FUNC (int8x8x2_t, uint8_t, 8b, b, s8,)
+__LD2_LANE_FUNC (float32x2x2_t, float32_t, 2s, s, f32,)
+__LD2_LANE_FUNC (float64x1x2_t, float64_t, 1d, d, f64,)
+__LD2_LANE_FUNC (poly8x8x2_t, poly8_t, 8b, b, p8,)
+__LD2_LANE_FUNC (poly16x4x2_t, poly16_t, 4h, h, p16,)
+__LD2_LANE_FUNC (int16x4x2_t, int16_t, 4h, h, s16,)
+__LD2_LANE_FUNC (int32x2x2_t, int32_t, 2s, s, s32,)
+__LD2_LANE_FUNC (int64x1x2_t, int64_t, 1d, d, s64,)
+__LD2_LANE_FUNC (uint8x8x2_t, uint8_t, 8b, b, u8,)
+__LD2_LANE_FUNC (uint16x4x2_t, uint16_t, 4h, h, u16,)
+__LD2_LANE_FUNC (uint32x2x2_t, uint32_t, 2s, s, u32,)
+__LD2_LANE_FUNC (uint64x1x2_t, uint64_t, 1d, d, u64,)
+__LD2_LANE_FUNC (float32x4x2_t, float32_t, 4s, s, f32, q)
+__LD2_LANE_FUNC (float64x2x2_t, float64_t, 2d, d, f64, q)
+__LD2_LANE_FUNC (poly8x16x2_t, poly8_t, 16b, b, p8, q)
+__LD2_LANE_FUNC (poly16x8x2_t, poly16_t, 8h, h, p16, q)
+__LD2_LANE_FUNC (int8x16x2_t, int8_t, 16b, b, s8, q)
+__LD2_LANE_FUNC (int16x8x2_t, int16_t, 8h, h, s16, q)
+__LD2_LANE_FUNC (int32x4x2_t, int32_t, 4s, s, s32, q)
+__LD2_LANE_FUNC (int64x2x2_t, int64_t, 2d, d, s64, q)
+__LD2_LANE_FUNC (uint8x16x2_t, uint8_t, 16b, b, u8, q)
+__LD2_LANE_FUNC (uint16x8x2_t, uint16_t, 8h, h, u16, q)
+__LD2_LANE_FUNC (uint32x4x2_t, uint32_t, 4s, s, u32, q)
+__LD2_LANE_FUNC (uint64x2x2_t, uint64_t, 2d, d, u64, q)
+
+#define __LD3R_FUNC(rettype, structtype, ptrtype,                      \
+                   regsuffix, funcsuffix, Q)                           \
+  __extension__ static __inline rettype                                        \
+  __attribute__ ((__always_inline__))                                  \
+  vld3 ## Q ## _dup_ ## funcsuffix (const ptrtype *ptr)                        \
+  {                                                                    \
+    rettype result;                                                    \
+    __asm__ ("ld3r {v16." #regsuffix " - v18." #regsuffix "}, %1\n\t"  \
+            "st1 {v16." #regsuffix " - v18." #regsuffix "}, %0\n\t"    \
+            : "=Q"(result)                                             \
+            : "Q"(*(const structtype *)ptr)                            \
+            : "memory", "v16", "v17", "v18");                          \
+    return result;                                                     \
+  }
+
+__LD3R_FUNC (float32x2x3_t, float32x3_t, float32_t, 2s, f32,)
+__LD3R_FUNC (float64x1x3_t, float64x3_t, float64_t, 1d, f64,)
+__LD3R_FUNC (poly8x8x3_t, poly8x3_t, poly8_t, 8b, p8,)
+__LD3R_FUNC (poly16x4x3_t, poly16x3_t, poly16_t, 4h, p16,)
+__LD3R_FUNC (int8x8x3_t, int8x3_t, int8_t, 8b, s8,)
+__LD3R_FUNC (int16x4x3_t, int16x3_t, int16_t, 4h, s16,)
+__LD3R_FUNC (int32x2x3_t, int32x3_t, int32_t, 2s, s32,)
+__LD3R_FUNC (int64x1x3_t, int64x3_t, int64_t, 1d, s64,)
+__LD3R_FUNC (uint8x8x3_t, uint8x3_t, uint8_t, 8b, u8,)
+__LD3R_FUNC (uint16x4x3_t, uint16x3_t, uint16_t, 4h, u16,)
+__LD3R_FUNC (uint32x2x3_t, uint32x3_t, uint32_t, 2s, u32,)
+__LD3R_FUNC (uint64x1x3_t, uint64x3_t, uint64_t, 1d, u64,)
+__LD3R_FUNC (float32x4x3_t, float32x3_t, float32_t, 4s, f32, q)
+__LD3R_FUNC (float64x2x3_t, float64x3_t, float64_t, 2d, f64, q)
+__LD3R_FUNC (poly8x16x3_t, poly8x3_t, poly8_t, 16b, p8, q)
+__LD3R_FUNC (poly16x8x3_t, poly16x3_t, poly16_t, 8h, p16, q)
+__LD3R_FUNC (int8x16x3_t, int8x3_t, int8_t, 16b, s8, q)
+__LD3R_FUNC (int16x8x3_t, int16x3_t, int16_t, 8h, s16, q)
+__LD3R_FUNC (int32x4x3_t, int32x3_t, int32_t, 4s, s32, q)
+__LD3R_FUNC (int64x2x3_t, int64x3_t, int64_t, 2d, s64, q)
+__LD3R_FUNC (uint8x16x3_t, uint8x3_t, uint8_t, 16b, u8, q)
+__LD3R_FUNC (uint16x8x3_t, uint16x3_t, uint16_t, 8h, u16, q)
+__LD3R_FUNC (uint32x4x3_t, uint32x3_t, uint32_t, 4s, u32, q)
+__LD3R_FUNC (uint64x2x3_t, uint64x3_t, uint64_t, 2d, u64, q)
+
+#define __LD3_LANE_FUNC(rettype, ptrtype, regsuffix,                   \
+                       lnsuffix, funcsuffix, Q)                        \
+  __extension__ static __inline rettype                                        \
+  __attribute__ ((__always_inline__))                                  \
+  vld3 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr,               \
+                                    rettype b, const int c)            \
+  {                                                                    \
+    rettype result;                                                    \
+    __asm__ ("ld1 {v16." #regsuffix " - v18." #regsuffix "}, %1\n\t"   \
+            "ld3 {v16." #lnsuffix " - v18." #lnsuffix "}[%3], %2\n\t"  \
+            "st1 {v16." #regsuffix " - v18." #regsuffix "}, %0\n\t"    \
+            : "=Q"(result)                                             \
+            : "Q"(b), "Q"(*(const rettype *)ptr), "i"(c)               \
+            : "memory", "v16", "v17", "v18");                          \
+    return result;                                                     \
+  }
+
+__LD3_LANE_FUNC (int8x8x3_t, uint8_t, 8b, b, s8,)
+__LD3_LANE_FUNC (float32x2x3_t, float32_t, 2s, s, f32,)
+__LD3_LANE_FUNC (float64x1x3_t, float64_t, 1d, d, f64,)
+__LD3_LANE_FUNC (poly8x8x3_t, poly8_t, 8b, b, p8,)
+__LD3_LANE_FUNC (poly16x4x3_t, poly16_t, 4h, h, p16,)
+__LD3_LANE_FUNC (int16x4x3_t, int16_t, 4h, h, s16,)
+__LD3_LANE_FUNC (int32x2x3_t, int32_t, 2s, s, s32,)
+__LD3_LANE_FUNC (int64x1x3_t, int64_t, 1d, d, s64,)
+__LD3_LANE_FUNC (uint8x8x3_t, uint8_t, 8b, b, u8,)
+__LD3_LANE_FUNC (uint16x4x3_t, uint16_t, 4h, h, u16,)
+__LD3_LANE_FUNC (uint32x2x3_t, uint32_t, 2s, s, u32,)
+__LD3_LANE_FUNC (uint64x1x3_t, uint64_t, 1d, d, u64,)
+__LD3_LANE_FUNC (float32x4x3_t, float32_t, 4s, s, f32, q)
+__LD3_LANE_FUNC (float64x2x3_t, float64_t, 2d, d, f64, q)
+__LD3_LANE_FUNC (poly8x16x3_t, poly8_t, 16b, b, p8, q)
+__LD3_LANE_FUNC (poly16x8x3_t, poly16_t, 8h, h, p16, q)
+__LD3_LANE_FUNC (int8x16x3_t, int8_t, 16b, b, s8, q)
+__LD3_LANE_FUNC (int16x8x3_t, int16_t, 8h, h, s16, q)
+__LD3_LANE_FUNC (int32x4x3_t, int32_t, 4s, s, s32, q)
+__LD3_LANE_FUNC (int64x2x3_t, int64_t, 2d, d, s64, q)
+__LD3_LANE_FUNC (uint8x16x3_t, uint8_t, 16b, b, u8, q)
+__LD3_LANE_FUNC (uint16x8x3_t, uint16_t, 8h, h, u16, q)
+__LD3_LANE_FUNC (uint32x4x3_t, uint32_t, 4s, s, u32, q)
+__LD3_LANE_FUNC (uint64x2x3_t, uint64_t, 2d, d, u64, q)
+
+#define __LD4R_FUNC(rettype, structtype, ptrtype,                      \
+                   regsuffix, funcsuffix, Q)                           \
+  __extension__ static __inline rettype                                        \
+  __attribute__ ((__always_inline__))                                  \
+  vld4 ## Q ## _dup_ ## funcsuffix (const ptrtype *ptr)                        \
+  {                                                                    \
+    rettype result;                                                    \
+    __asm__ ("ld4r {v16." #regsuffix " - v19." #regsuffix "}, %1\n\t"  \
+            "st1 {v16." #regsuffix " - v19." #regsuffix "}, %0\n\t"    \
+            : "=Q"(result)                                             \
+            : "Q"(*(const structtype *)ptr)                            \
+            : "memory", "v16", "v17", "v18", "v19");                   \
+    return result;                                                     \
+  }
+
+__LD4R_FUNC (float32x2x4_t, float32x4_t, float32_t, 2s, f32,)
+__LD4R_FUNC (float64x1x4_t, float64x4_t, float64_t, 1d, f64,)
+__LD4R_FUNC (poly8x8x4_t, poly8x4_t, poly8_t, 8b, p8,)
+__LD4R_FUNC (poly16x4x4_t, poly16x4_t, poly16_t, 4h, p16,)
+__LD4R_FUNC (int8x8x4_t, int8x4_t, int8_t, 8b, s8,)
+__LD4R_FUNC (int16x4x4_t, int16x4_t, int16_t, 4h, s16,)
+__LD4R_FUNC (int32x2x4_t, int32x4_t, int32_t, 2s, s32,)
+__LD4R_FUNC (int64x1x4_t, int64x4_t, int64_t, 1d, s64,)
+__LD4R_FUNC (uint8x8x4_t, uint8x4_t, uint8_t, 8b, u8,)
+__LD4R_FUNC (uint16x4x4_t, uint16x4_t, uint16_t, 4h, u16,)
+__LD4R_FUNC (uint32x2x4_t, uint32x4_t, uint32_t, 2s, u32,)
+__LD4R_FUNC (uint64x1x4_t, uint64x4_t, uint64_t, 1d, u64,)
+__LD4R_FUNC (float32x4x4_t, float32x4_t, float32_t, 4s, f32, q)
+__LD4R_FUNC (float64x2x4_t, float64x4_t, float64_t, 2d, f64, q)
+__LD4R_FUNC (poly8x16x4_t, poly8x4_t, poly8_t, 16b, p8, q)
+__LD4R_FUNC (poly16x8x4_t, poly16x4_t, poly16_t, 8h, p16, q)
+__LD4R_FUNC (int8x16x4_t, int8x4_t, int8_t, 16b, s8, q)
+__LD4R_FUNC (int16x8x4_t, int16x4_t, int16_t, 8h, s16, q)
+__LD4R_FUNC (int32x4x4_t, int32x4_t, int32_t, 4s, s32, q)
+__LD4R_FUNC (int64x2x4_t, int64x4_t, int64_t, 2d, s64, q)
+__LD4R_FUNC (uint8x16x4_t, uint8x4_t, uint8_t, 16b, u8, q)
+__LD4R_FUNC (uint16x8x4_t, uint16x4_t, uint16_t, 8h, u16, q)
+__LD4R_FUNC (uint32x4x4_t, uint32x4_t, uint32_t, 4s, u32, q)
+__LD4R_FUNC (uint64x2x4_t, uint64x4_t, uint64_t, 2d, u64, q)
+
+#define __LD4_LANE_FUNC(rettype, ptrtype, regsuffix,                   \
+                       lnsuffix, funcsuffix, Q)                        \
+  __extension__ static __inline rettype                                        \
+  __attribute__ ((__always_inline__))                                  \
+  vld4 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr,               \
+                                    rettype b, const int c)            \
+  {                                                                    \
+    rettype result;                                                    \
+    __asm__ ("ld1 {v16." #regsuffix " - v19." #regsuffix "}, %1\n\t"   \
+            "ld4 {v16." #lnsuffix " - v19." #lnsuffix "}[%3], %2\n\t"  \
+            "st1 {v16." #regsuffix " - v19." #regsuffix "}, %0\n\t"    \
+            : "=Q"(result)                                             \
+            : "Q"(b), "Q"(*(const rettype *)ptr), "i"(c)               \
+            : "memory", "v16", "v17", "v18", "v19");                   \
+    return result;                                                     \
+  }
+
+__LD4_LANE_FUNC (int8x8x4_t, uint8_t, 8b, b, s8,)
+__LD4_LANE_FUNC (float32x2x4_t, float32_t, 2s, s, f32,)
+__LD4_LANE_FUNC (float64x1x4_t, float64_t, 1d, d, f64,)
+__LD4_LANE_FUNC (poly8x8x4_t, poly8_t, 8b, b, p8,)
+__LD4_LANE_FUNC (poly16x4x4_t, poly16_t, 4h, h, p16,)
+__LD4_LANE_FUNC (int16x4x4_t, int16_t, 4h, h, s16,)
+__LD4_LANE_FUNC (int32x2x4_t, int32_t, 2s, s, s32,)
+__LD4_LANE_FUNC (int64x1x4_t, int64_t, 1d, d, s64,)
+__LD4_LANE_FUNC (uint8x8x4_t, uint8_t, 8b, b, u8,)
+__LD4_LANE_FUNC (uint16x4x4_t, uint16_t, 4h, h, u16,)
+__LD4_LANE_FUNC (uint32x2x4_t, uint32_t, 2s, s, u32,)
+__LD4_LANE_FUNC (uint64x1x4_t, uint64_t, 1d, d, u64,)
+__LD4_LANE_FUNC (float32x4x4_t, float32_t, 4s, s, f32, q)
+__LD4_LANE_FUNC (float64x2x4_t, float64_t, 2d, d, f64, q)
+__LD4_LANE_FUNC (poly8x16x4_t, poly8_t, 16b, b, p8, q)
+__LD4_LANE_FUNC (poly16x8x4_t, poly16_t, 8h, h, p16, q)
+__LD4_LANE_FUNC (int8x16x4_t, int8_t, 16b, b, s8, q)
+__LD4_LANE_FUNC (int16x8x4_t, int16_t, 8h, h, s16, q)
+__LD4_LANE_FUNC (int32x4x4_t, int32_t, 4s, s, s32, q)
+__LD4_LANE_FUNC (int64x2x4_t, int64_t, 2d, d, s64, q)
+__LD4_LANE_FUNC (uint8x16x4_t, uint8_t, 16b, b, u8, q)
+__LD4_LANE_FUNC (uint16x8x4_t, uint16_t, 8h, h, u16, q)
+__LD4_LANE_FUNC (uint32x4x4_t, uint32_t, 4s, s, u32, q)
+__LD4_LANE_FUNC (uint64x2x4_t, uint64_t, 2d, d, u64, q)
+
+#define __ST2_LANE_FUNC(intype, ptrtype, regsuffix,                    \
+                       lnsuffix, funcsuffix, Q)                        \
+  __extension__ static __inline void                                   \
+  __attribute__ ((__always_inline__))                                  \
+  vst2 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr,               \
+                                    intype b, const int c)             \
+  {                                                                    \
+    __asm__ ("ld1 {v16." #regsuffix ", v17." #regsuffix "}, %1\n\t"    \
+            "st2 {v16." #lnsuffix ", v17." #lnsuffix "}[%2], %0\n\t"   \
+            : "=Q"(*(intype *) ptr)                                    \
+            : "Q"(b), "i"(c)                                           \
+            : "memory", "v16", "v17");                                 \
+  }
+
+__ST2_LANE_FUNC (int8x8x2_t, int8_t, 8b, b, s8,)
+__ST2_LANE_FUNC (float32x2x2_t, float32_t, 2s, s, f32,)
+__ST2_LANE_FUNC (float64x1x2_t, float64_t, 1d, d, f64,)
+__ST2_LANE_FUNC (poly8x8x2_t, poly8_t, 8b, b, p8,)
+__ST2_LANE_FUNC (poly16x4x2_t, poly16_t, 4h, h, p16,)
+__ST2_LANE_FUNC (int16x4x2_t, int16_t, 4h, h, s16,)
+__ST2_LANE_FUNC (int32x2x2_t, int32_t, 2s, s, s32,)
+__ST2_LANE_FUNC (int64x1x2_t, int64_t, 1d, d, s64,)
+__ST2_LANE_FUNC (uint8x8x2_t, uint8_t, 8b, b, u8,)
+__ST2_LANE_FUNC (uint16x4x2_t, uint16_t, 4h, h, u16,)
+__ST2_LANE_FUNC (uint32x2x2_t, uint32_t, 2s, s, u32,)
+__ST2_LANE_FUNC (uint64x1x2_t, uint64_t, 1d, d, u64,)
+__ST2_LANE_FUNC (float32x4x2_t, float32_t, 4s, s, f32, q)
+__ST2_LANE_FUNC (float64x2x2_t, float64_t, 2d, d, f64, q)
+__ST2_LANE_FUNC (poly8x16x2_t, poly8_t, 16b, b, p8, q)
+__ST2_LANE_FUNC (poly16x8x2_t, poly16_t, 8h, h, p16, q)
+__ST2_LANE_FUNC (int8x16x2_t, int8_t, 16b, b, s8, q)
+__ST2_LANE_FUNC (int16x8x2_t, int16_t, 8h, h, s16, q)
+__ST2_LANE_FUNC (int32x4x2_t, int32_t, 4s, s, s32, q)
+__ST2_LANE_FUNC (int64x2x2_t, int64_t, 2d, d, s64, q)
+__ST2_LANE_FUNC (uint8x16x2_t, uint8_t, 16b, b, u8, q)
+__ST2_LANE_FUNC (uint16x8x2_t, uint16_t, 8h, h, u16, q)
+__ST2_LANE_FUNC (uint32x4x2_t, uint32_t, 4s, s, u32, q)
+__ST2_LANE_FUNC (uint64x2x2_t, uint64_t, 2d, d, u64, q)
+
+#define __ST3_LANE_FUNC(intype, ptrtype, regsuffix,                    \
+                       lnsuffix, funcsuffix, Q)                        \
+  __extension__ static __inline void                                   \
+  __attribute__ ((__always_inline__))                                  \
+  vst3 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr,               \
+                                    intype b, const int c)             \
+  {                                                                    \
+    __asm__ ("ld1 {v16." #regsuffix " - v18." #regsuffix "}, %1\n\t"   \
+            "st3 {v16." #lnsuffix " - v18." #lnsuffix "}[%2], %0\n\t"  \
+            : "=Q"(*(intype *) ptr)                                    \
+            : "Q"(b), "i"(c)                                           \
+            : "memory", "v16", "v17", "v18");                          \
+  }
+
+__ST3_LANE_FUNC (int8x8x3_t, int8_t, 8b, b, s8,)
+__ST3_LANE_FUNC (float32x2x3_t, float32_t, 2s, s, f32,)
+__ST3_LANE_FUNC (float64x1x3_t, float64_t, 1d, d, f64,)
+__ST3_LANE_FUNC (poly8x8x3_t, poly8_t, 8b, b, p8,)
+__ST3_LANE_FUNC (poly16x4x3_t, poly16_t, 4h, h, p16,)
+__ST3_LANE_FUNC (int16x4x3_t, int16_t, 4h, h, s16,)
+__ST3_LANE_FUNC (int32x2x3_t, int32_t, 2s, s, s32,)
+__ST3_LANE_FUNC (int64x1x3_t, int64_t, 1d, d, s64,)
+__ST3_LANE_FUNC (uint8x8x3_t, uint8_t, 8b, b, u8,)
+__ST3_LANE_FUNC (uint16x4x3_t, uint16_t, 4h, h, u16,)
+__ST3_LANE_FUNC (uint32x2x3_t, uint32_t, 2s, s, u32,)
+__ST3_LANE_FUNC (uint64x1x3_t, uint64_t, 1d, d, u64,)
+__ST3_LANE_FUNC (float32x4x3_t, float32_t, 4s, s, f32, q)
+__ST3_LANE_FUNC (float64x2x3_t, float64_t, 2d, d, f64, q)
+__ST3_LANE_FUNC (poly8x16x3_t, poly8_t, 16b, b, p8, q)
+__ST3_LANE_FUNC (poly16x8x3_t, poly16_t, 8h, h, p16, q)
+__ST3_LANE_FUNC (int8x16x3_t, int8_t, 16b, b, s8, q)
+__ST3_LANE_FUNC (int16x8x3_t, int16_t, 8h, h, s16, q)
+__ST3_LANE_FUNC (int32x4x3_t, int32_t, 4s, s, s32, q)
+__ST3_LANE_FUNC (int64x2x3_t, int64_t, 2d, d, s64, q)
+__ST3_LANE_FUNC (uint8x16x3_t, uint8_t, 16b, b, u8, q)
+__ST3_LANE_FUNC (uint16x8x3_t, uint16_t, 8h, h, u16, q)
+__ST3_LANE_FUNC (uint32x4x3_t, uint32_t, 4s, s, u32, q)
+__ST3_LANE_FUNC (uint64x2x3_t, uint64_t, 2d, d, u64, q)
+
+#define __ST4_LANE_FUNC(intype, ptrtype, regsuffix,                    \
+                       lnsuffix, funcsuffix, Q)                        \
+  __extension__ static __inline void                                   \
+  __attribute__ ((__always_inline__))                                  \
+  vst4 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr,               \
+                                    intype b, const int c)             \
+  {                                                                    \
+    __asm__ ("ld1 {v16." #regsuffix " - v19." #regsuffix "}, %1\n\t"   \
+            "st4 {v16." #lnsuffix " - v19." #lnsuffix "}[%2], %0\n\t"  \
+            : "=Q"(*(intype *) ptr)                                    \
+            : "Q"(b), "i"(c)                                           \
+            : "memory", "v16", "v17", "v18", "v19");                   \
+  }
+
+__ST4_LANE_FUNC (int8x8x4_t, int8_t, 8b, b, s8,)
+__ST4_LANE_FUNC (float32x2x4_t, float32_t, 2s, s, f32,)
+__ST4_LANE_FUNC (float64x1x4_t, float64_t, 1d, d, f64,)
+__ST4_LANE_FUNC (poly8x8x4_t, poly8_t, 8b, b, p8,)
+__ST4_LANE_FUNC (poly16x4x4_t, poly16_t, 4h, h, p16,)
+__ST4_LANE_FUNC (int16x4x4_t, int16_t, 4h, h, s16,)
+__ST4_LANE_FUNC (int32x2x4_t, int32_t, 2s, s, s32,)
+__ST4_LANE_FUNC (int64x1x4_t, int64_t, 1d, d, s64,)
+__ST4_LANE_FUNC (uint8x8x4_t, uint8_t, 8b, b, u8,)
+__ST4_LANE_FUNC (uint16x4x4_t, uint16_t, 4h, h, u16,)
+__ST4_LANE_FUNC (uint32x2x4_t, uint32_t, 2s, s, u32,)
+__ST4_LANE_FUNC (uint64x1x4_t, uint64_t, 1d, d, u64,)
+__ST4_LANE_FUNC (float32x4x4_t, float32_t, 4s, s, f32, q)
+__ST4_LANE_FUNC (float64x2x4_t, float64_t, 2d, d, f64, q)
+__ST4_LANE_FUNC (poly8x16x4_t, poly8_t, 16b, b, p8, q)
+__ST4_LANE_FUNC (poly16x8x4_t, poly16_t, 8h, h, p16, q)
+__ST4_LANE_FUNC (int8x16x4_t, int8_t, 16b, b, s8, q)
+__ST4_LANE_FUNC (int16x8x4_t, int16_t, 8h, h, s16, q)
+__ST4_LANE_FUNC (int32x4x4_t, int32_t, 4s, s, s32, q)
+__ST4_LANE_FUNC (int64x2x4_t, int64_t, 2d, d, s64, q)
+__ST4_LANE_FUNC (uint8x16x4_t, uint8_t, 16b, b, u8, q)
+__ST4_LANE_FUNC (uint16x8x4_t, uint16_t, 8h, h, u16, q)
+__ST4_LANE_FUNC (uint32x4x4_t, uint32_t, 4s, s, u32, q)
+__ST4_LANE_FUNC (uint64x2x4_t, uint64_t, 2d, d, u64, q)
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vaddlv_s32 (int32x2_t a)
+{
+  int64_t result;
+  __asm__ ("saddlp %0.1d, %1.2s" : "=w"(result) : "w"(a) : );
+  return result;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vaddlv_u32 (uint32x2_t a)
+{
+  uint64_t result;
+  __asm__ ("uaddlp %0.1d, %1.2s" : "=w"(result) : "w"(a) : );
+  return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vaddv_s32 (int32x2_t a)
+{
+  int32_t result;
+  __asm__ ("addp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+  return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vaddv_u32 (uint32x2_t a)
+{
+  uint32_t result;
+  __asm__ ("addp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vmaxnmv_f32 (float32x2_t a)
+{
+  float32_t result;
+  __asm__ ("fmaxnmp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+  return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vminnmv_f32 (float32x2_t a)
+{
+  float32_t result;
+  __asm__ ("fminnmp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vmaxnmvq_f64 (float64x2_t a)
+{
+  float64_t result;
+  __asm__ ("fmaxnmp %0.2d, %1.2d, %1.2d" : "=w"(result) : "w"(a) : );
+  return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vmaxv_s32 (int32x2_t a)
+{
+  int32_t result;
+  __asm__ ("smaxp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+  return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vmaxv_u32 (uint32x2_t a)
+{
+  uint32_t result;
+  __asm__ ("umaxp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+  return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vminnmvq_f64 (float64x2_t a)
+{
+  float64_t result;
+  __asm__ ("fminnmp %0.2d, %1.2d, %1.2d" : "=w"(result) : "w"(a) : );
+  return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vminv_s32 (int32x2_t a)
+{
+  int32_t result;
+  __asm__ ("sminp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+  return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vminv_u32 (uint32x2_t a)
+{
+  uint32_t result;
+  __asm__ ("uminp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+  return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vpaddd_s64 (int64x2_t __a)
+{
+  return __builtin_aarch64_addpdi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
+{
+  return __builtin_aarch64_sqdmulh_lanev4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
+{
+  return __builtin_aarch64_sqdmulh_lanev2si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+  return __builtin_aarch64_sqdmulh_lanev8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+  return __builtin_aarch64_sqdmulh_lanev4si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
+{
+  return  __builtin_aarch64_sqrdmulh_lanev4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
+{
+  return __builtin_aarch64_sqrdmulh_lanev2si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+  return __builtin_aarch64_sqrdmulh_lanev8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+  return __builtin_aarch64_sqrdmulh_lanev4si (__a, __b, __c);
+}
+
+/* Table intrinsics.  */
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbl1_p8 (poly8x16_t a, uint8x8_t b)
+{
+  poly8x8_t result;
+  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbl1_s8 (int8x16_t a, int8x8_t b)
+{
+  int8x8_t result;
+  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbl1_u8 (uint8x16_t a, uint8x8_t b)
+{
+  uint8x8_t result;
+  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbl1q_p8 (poly8x16_t a, uint8x16_t b)
+{
+  poly8x16_t result;
+  __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbl1q_s8 (int8x16_t a, int8x16_t b)
+{
+  int8x16_t result;
+  __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbl1q_u8 (uint8x16_t a, uint8x16_t b)
+{
+  uint8x16_t result;
+  __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
+           : "=w"(result)
+           : "w"(a), "w"(b)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbl2_s8 (int8x16x2_t tab, int8x8_t idx)
+{
+  int8x8_t result;
+  __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+          "tbl %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17");
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbl2_u8 (uint8x16x2_t tab, uint8x8_t idx)
+{
+  uint8x8_t result;
+  __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+          "tbl %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17");
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbl2_p8 (poly8x16x2_t tab, uint8x8_t idx)
+{
+  poly8x8_t result;
+  __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+          "tbl %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17");
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbl2q_s8 (int8x16x2_t tab, int8x16_t idx)
+{
+  int8x16_t result;
+  __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+          "tbl %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17");
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbl2q_u8 (uint8x16x2_t tab, uint8x16_t idx)
+{
+  uint8x16_t result;
+  __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+          "tbl %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17");
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbl2q_p8 (poly8x16x2_t tab, uint8x16_t idx)
+{
+  poly8x16_t result;
+  __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+          "tbl %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17");
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbl3_s8 (int8x16x3_t tab, int8x8_t idx)
+{
+  int8x8_t result;
+  __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+          "tbl %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18");
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbl3_u8 (uint8x16x3_t tab, uint8x8_t idx)
+{
+  uint8x8_t result;
+  __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+          "tbl %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18");
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbl3_p8 (poly8x16x3_t tab, uint8x8_t idx)
+{
+  poly8x8_t result;
+  __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+          "tbl %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18");
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbl3q_s8 (int8x16x3_t tab, int8x16_t idx)
+{
+  int8x16_t result;
+  __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+          "tbl %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18");
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbl3q_u8 (uint8x16x3_t tab, uint8x16_t idx)
+{
+  uint8x16_t result;
+  __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+          "tbl %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18");
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbl3q_p8 (poly8x16x3_t tab, uint8x16_t idx)
+{
+  poly8x16_t result;
+  __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+          "tbl %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18");
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbl4_s8 (int8x16x4_t tab, int8x8_t idx)
+{
+  int8x8_t result;
+  __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+          "tbl %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18", "v19");
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbl4_u8 (uint8x16x4_t tab, uint8x8_t idx)
+{
+  uint8x8_t result;
+  __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+          "tbl %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18", "v19");
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbl4_p8 (poly8x16x4_t tab, uint8x8_t idx)
+{
+  poly8x8_t result;
+  __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+          "tbl %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18", "v19");
+  return result;
+}
+
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbl4q_s8 (int8x16x4_t tab, int8x16_t idx)
+{
+  int8x16_t result;
+  __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+          "tbl %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18", "v19");
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbl4q_u8 (uint8x16x4_t tab, uint8x16_t idx)
+{
+  uint8x16_t result;
+  __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+          "tbl %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18", "v19");
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbl4q_p8 (poly8x16x4_t tab, uint8x16_t idx)
+{
+  poly8x16_t result;
+  __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+          "tbl %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+          :"=w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18", "v19");
+  return result;
+}
+
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbx1_s8 (int8x8_t r, int8x16_t tab, int8x8_t idx)
+{
+  int8x8_t result = r;
+  __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
+           : "+w"(result)
+           : "w"(tab), "w"(idx)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbx1_u8 (uint8x8_t r, uint8x16_t tab, uint8x8_t idx)
+{
+  uint8x8_t result = r;
+  __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
+           : "+w"(result)
+           : "w"(tab), "w"(idx)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbx1_p8 (poly8x8_t r, poly8x16_t tab, uint8x8_t idx)
+{
+  poly8x8_t result = r;
+  __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
+           : "+w"(result)
+           : "w"(tab), "w"(idx)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbx1q_s8 (int8x16_t r, int8x16_t tab, int8x16_t idx)
+{
+  int8x16_t result = r;
+  __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
+           : "+w"(result)
+           : "w"(tab), "w"(idx)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbx1q_u8 (uint8x16_t r, uint8x16_t tab, uint8x16_t idx)
+{
+  uint8x16_t result = r;
+  __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
+           : "+w"(result)
+           : "w"(tab), "w"(idx)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbx1q_p8 (poly8x16_t r, poly8x16_t tab, uint8x16_t idx)
+{
+  poly8x16_t result = r;
+  __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
+           : "+w"(result)
+           : "w"(tab), "w"(idx)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbx2_s8 (int8x8_t r, int8x16x2_t tab, int8x8_t idx)
+{
+  int8x8_t result = r;
+  __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+          "tbx %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17");
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbx2_u8 (uint8x8_t r, uint8x16x2_t tab, uint8x8_t idx)
+{
+  uint8x8_t result = r;
+  __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+          "tbx %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17");
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbx2_p8 (poly8x8_t r, poly8x16x2_t tab, uint8x8_t idx)
+{
+  poly8x8_t result = r;
+  __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+          "tbx %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17");
+  return result;
+}
+
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbx2q_s8 (int8x16_t r, int8x16x2_t tab, int8x16_t idx)
+{
+  int8x16_t result = r;
+  __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+          "tbx %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17");
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbx2q_u8 (uint8x16_t r, uint8x16x2_t tab, uint8x16_t idx)
+{
+  uint8x16_t result = r;
+  __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+          "tbx %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17");
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbx2q_p8 (poly8x16_t r, poly8x16x2_t tab, uint8x16_t idx)
+{
+  poly8x16_t result = r;
+  __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+          "tbx %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17");
+  return result;
+}
+
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbx3_s8 (int8x8_t r, int8x16x3_t tab, int8x8_t idx)
+{
+  int8x8_t result = r;
+  __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+          "tbx %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18");
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbx3_u8 (uint8x8_t r, uint8x16x3_t tab, uint8x8_t idx)
+{
+  uint8x8_t result = r;
+  __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+          "tbx %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18");
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbx3_p8 (poly8x8_t r, poly8x16x3_t tab, uint8x8_t idx)
+{
+  poly8x8_t result = r;
+  __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+          "tbx %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18");
+  return result;
+}
+
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbx3q_s8 (int8x16_t r, int8x16x3_t tab, int8x16_t idx)
+{
+  int8x16_t result = r;
+  __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+          "tbx %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18");
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbx3q_u8 (uint8x16_t r, uint8x16x3_t tab, uint8x16_t idx)
+{
+  uint8x16_t result = r;
+  __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+          "tbx %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18");
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbx3q_p8 (poly8x16_t r, poly8x16x3_t tab, uint8x16_t idx)
+{
+  poly8x16_t result = r;
+  __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+          "tbx %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18");
+  return result;
+}
+
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbx4_s8 (int8x8_t r, int8x16x4_t tab, int8x8_t idx)
+{
+  int8x8_t result = r;
+  __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+          "tbx %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18", "v19");
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbx4_u8 (uint8x8_t r, uint8x16x4_t tab, uint8x8_t idx)
+{
+  uint8x8_t result = r;
+  __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+          "tbx %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18", "v19");
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbx4_p8 (poly8x8_t r, poly8x16x4_t tab, uint8x8_t idx)
+{
+  poly8x8_t result = r;
+  __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+          "tbx %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18", "v19");
+  return result;
+}
+
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbx4q_s8 (int8x16_t r, int8x16x4_t tab, int8x16_t idx)
+{
+  int8x16_t result = r;
+  __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+          "tbx %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18", "v19");
+  return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbx4q_u8 (uint8x16_t r, uint8x16x4_t tab, uint8x16_t idx)
+{
+  uint8x16_t result = r;
+  __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+          "tbx %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18", "v19");
+  return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbx4q_p8 (poly8x16_t r, poly8x16x4_t tab, uint8x16_t idx)
+{
+  poly8x16_t result = r;
+  __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+          "tbx %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+          :"+w"(result)
+          :"Q"(tab),"w"(idx)
+          :"memory", "v16", "v17", "v18", "v19");
+  return result;
+}
+
+/* V7 legacy table intrinsics.  */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl1_s8 (int8x8_t tab, int8x8_t idx)
+{
+  int8x8_t result;
+  int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (UINT64_C (0x0)));
+  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+           : "=w"(result)
+           : "w"(temp), "w"(idx)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl1_u8 (uint8x8_t tab, uint8x8_t idx)
+{
+  uint8x8_t result;
+  uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (UINT64_C (0x0)));
+  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+           : "=w"(result)
+           : "w"(temp), "w"(idx)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl1_p8 (poly8x8_t tab, uint8x8_t idx)
+{
+  poly8x8_t result;
+  poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (UINT64_C (0x0)));
+  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+           : "=w"(result)
+           : "w"(temp), "w"(idx)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl2_s8 (int8x8x2_t tab, int8x8_t idx)
+{
+  int8x8_t result;
+  int8x16_t temp = vcombine_s8 (tab.val[0], tab.val[1]);
+  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+           : "=w"(result)
+           : "w"(temp), "w"(idx)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl2_u8 (uint8x8x2_t tab, uint8x8_t idx)
+{
+  uint8x8_t result;
+  uint8x16_t temp = vcombine_u8 (tab.val[0], tab.val[1]);
+  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+           : "=w"(result)
+           : "w"(temp), "w"(idx)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl2_p8 (poly8x8x2_t tab, uint8x8_t idx)
+{
+  poly8x8_t result;
+  poly8x16_t temp = vcombine_p8 (tab.val[0], tab.val[1]);
+  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+           : "=w"(result)
+           : "w"(temp), "w"(idx)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl3_s8 (int8x8x3_t tab, int8x8_t idx)
+{
+  int8x8_t result;
+  int8x16x2_t temp;
+  temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
+  temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (UINT64_C (0x0)));
+  __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+          "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+           : "=w"(result)
+           : "Q"(temp), "w"(idx)
+           : "v16", "v17", "memory");
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl3_u8 (uint8x8x3_t tab, uint8x8_t idx)
+{
+  uint8x8_t result;
+  uint8x16x2_t temp;
+  temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
+  temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (UINT64_C (0x0)));
+  __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+          "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+           : "=w"(result)
+           : "Q"(temp), "w"(idx)
+           : "v16", "v17", "memory");
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl3_p8 (poly8x8x3_t tab, uint8x8_t idx)
+{
+  poly8x8_t result;
+  poly8x16x2_t temp;
+  temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
+  temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (UINT64_C (0x0)));
+  __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+          "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+           : "=w"(result)
+           : "Q"(temp), "w"(idx)
+           : "v16", "v17", "memory");
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl4_s8 (int8x8x4_t tab, int8x8_t idx)
+{
+  int8x8_t result;
+  int8x16x2_t temp;
+  temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
+  temp.val[1] = vcombine_s8 (tab.val[2], tab.val[3]);
+  __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+          "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+           : "=w"(result)
+           : "Q"(temp), "w"(idx)
+           : "v16", "v17", "memory");
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl4_u8 (uint8x8x4_t tab, uint8x8_t idx)
+{
+  uint8x8_t result;
+  uint8x16x2_t temp;
+  temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
+  temp.val[1] = vcombine_u8 (tab.val[2], tab.val[3]);
+  __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+          "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+           : "=w"(result)
+           : "Q"(temp), "w"(idx)
+           : "v16", "v17", "memory");
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl4_p8 (poly8x8x4_t tab, uint8x8_t idx)
+{
+  poly8x8_t result;
+  poly8x16x2_t temp;
+  temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
+  temp.val[1] = vcombine_p8 (tab.val[2], tab.val[3]);
+  __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+          "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+           : "=w"(result)
+           : "Q"(temp), "w"(idx)
+           : "v16", "v17", "memory");
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx1_s8 (int8x8_t r, int8x8_t tab, int8x8_t idx)
+{
+  int8x8_t result;
+  int8x8_t tmp1;
+  int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (UINT64_C (0x0)));
+  __asm__ ("movi %0.8b, 8\n\t"
+          "cmhs %0.8b, %3.8b, %0.8b\n\t"
+          "tbl %1.8b, {%2.16b}, %3.8b\n\t"
+          "bsl %0.8b, %4.8b, %1.8b\n\t"
+           : "+w"(result), "=w"(tmp1)
+           : "w"(temp), "w"(idx), "w"(r)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx1_u8 (uint8x8_t r, uint8x8_t tab, uint8x8_t idx)
+{
+  uint8x8_t result;
+  uint8x8_t tmp1;
+  uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (UINT64_C (0x0)));
+  __asm__ ("movi %0.8b, 8\n\t"
+          "cmhs %0.8b, %3.8b, %0.8b\n\t"
+          "tbl %1.8b, {%2.16b}, %3.8b\n\t"
+          "bsl %0.8b, %4.8b, %1.8b\n\t"
+           : "+w"(result), "=w"(tmp1)
+           : "w"(temp), "w"(idx), "w"(r)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx1_p8 (poly8x8_t r, poly8x8_t tab, uint8x8_t idx)
+{
+  poly8x8_t result;
+  poly8x8_t tmp1;
+  poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (UINT64_C (0x0)));
+  __asm__ ("movi %0.8b, 8\n\t"
+          "cmhs %0.8b, %3.8b, %0.8b\n\t"
+          "tbl %1.8b, {%2.16b}, %3.8b\n\t"
+          "bsl %0.8b, %4.8b, %1.8b\n\t"
+           : "+w"(result), "=w"(tmp1)
+           : "w"(temp), "w"(idx), "w"(r)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx2_s8 (int8x8_t r, int8x8x2_t tab, int8x8_t idx)
+{
+  int8x8_t result = r;
+  int8x16_t temp = vcombine_s8 (tab.val[0], tab.val[1]);
+  __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
+           : "+w"(result)
+           : "w"(temp), "w"(idx)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx2_u8 (uint8x8_t r, uint8x8x2_t tab, uint8x8_t idx)
+{
+  uint8x8_t result = r;
+  uint8x16_t temp = vcombine_u8 (tab.val[0], tab.val[1]);
+  __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
+           : "+w"(result)
+           : "w"(temp), "w"(idx)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx2_p8 (poly8x8_t r, poly8x8x2_t tab, uint8x8_t idx)
+{
+  poly8x8_t result = r;
+  poly8x16_t temp = vcombine_p8 (tab.val[0], tab.val[1]);
+  __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
+           : "+w"(result)
+           : "w"(temp), "w"(idx)
+           : /* No clobbers */);
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx3_s8 (int8x8_t r, int8x8x3_t tab, int8x8_t idx)
+{
+  int8x8_t result;
+  int8x8_t tmp1;
+  int8x16x2_t temp;
+  temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
+  temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (UINT64_C (0x0)));
+  __asm__ ("ld1 {v16.16b - v17.16b}, %2\n\t"
+          "movi %0.8b, 24\n\t"
+          "cmhs %0.8b, %3.8b, %0.8b\n\t"
+          "tbl %1.8b, {v16.16b - v17.16b}, %3.8b\n\t"
+          "bsl %0.8b, %4.8b, %1.8b\n\t"
+           : "+w"(result), "=w"(tmp1)
+           : "Q"(temp), "w"(idx), "w"(r)
+           : "v16", "v17", "memory");
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx3_u8 (uint8x8_t r, uint8x8x3_t tab, uint8x8_t idx)
+{
+  uint8x8_t result;
+  uint8x8_t tmp1;
+  uint8x16x2_t temp;
+  temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
+  temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (UINT64_C (0x0)));
+  __asm__ ("ld1 {v16.16b - v17.16b}, %2\n\t"
+          "movi %0.8b, 24\n\t"
+          "cmhs %0.8b, %3.8b, %0.8b\n\t"
+          "tbl %1.8b, {v16.16b - v17.16b}, %3.8b\n\t"
+          "bsl %0.8b, %4.8b, %1.8b\n\t"
+           : "+w"(result), "=w"(tmp1)
+           : "Q"(temp), "w"(idx), "w"(r)
+           : "v16", "v17", "memory");
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx3_p8 (poly8x8_t r, poly8x8x3_t tab, uint8x8_t idx)
+{
+  poly8x8_t result;
+  poly8x8_t tmp1;
+  poly8x16x2_t temp;
+  temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
+  temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (UINT64_C (0x0)));
+  __asm__ ("ld1 {v16.16b - v17.16b}, %2\n\t"
+          "movi %0.8b, 24\n\t"
+          "cmhs %0.8b, %3.8b, %0.8b\n\t"
+          "tbl %1.8b, {v16.16b - v17.16b}, %3.8b\n\t"
+          "bsl %0.8b, %4.8b, %1.8b\n\t"
+           : "+w"(result), "=w"(tmp1)
+           : "Q"(temp), "w"(idx), "w"(r)
+           : "v16", "v17", "memory");
+  return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx4_s8 (int8x8_t r, int8x8x4_t tab, int8x8_t idx)
+{
+  int8x8_t result = r;
+  int8x16x2_t temp;
+  temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
+  temp.val[1] = vcombine_s8 (tab.val[2], tab.val[3]);
+  __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+          "tbx %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+           : "+w"(result)
+           : "Q"(temp), "w"(idx)
+           : "v16", "v17", "memory");
+  return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx4_u8 (uint8x8_t r, uint8x8x4_t tab, uint8x8_t idx)
+{
+  uint8x8_t result = r;
+  uint8x16x2_t temp;
+  temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
+  temp.val[1] = vcombine_u8 (tab.val[2], tab.val[3]);
+  __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+          "tbx %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+           : "+w"(result)
+           : "Q"(temp), "w"(idx)
+           : "v16", "v17", "memory");
+  return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx4_p8 (poly8x8_t r, poly8x8x4_t tab, uint8x8_t idx)
+{
+  poly8x8_t result = r;
+  poly8x16x2_t temp;
+  temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
+  temp.val[1] = vcombine_p8 (tab.val[2], tab.val[3]);
+  __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+          "tbx %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+           : "+w"(result)
+           : "Q"(temp), "w"(idx)
+           : "v16", "v17", "memory");
+  return result;
+}
+
+/* End of temporary inline asm.  */
+
+/* Start of optimal implementations in approved order.  */
+
+/* vadd */
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vaddd_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vaddd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return __a + __b;
+}
+
+/* vceq */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_cmeqv8qi ((int8x8_t) __a,
+                                                (int8x8_t) __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_cmeqv8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vceq_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_cmeqv4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_cmeqv2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceq_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmeqdi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_cmeqv8qi ((int8x8_t) __a,
+                                                (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vceq_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_cmeqv4hi ((int16x4_t) __a,
+                                                 (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_cmeqv2si ((int32x2_t) __a,
+                                                 (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceq_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmeqdi ((int64x1_t) __a,
+                                               (int64x1_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_cmeqv16qi ((int8x16_t) __a,
+                                                  (int8x16_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_cmeqv16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vceqq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_cmeqv8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_cmeqv4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vceqq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_cmeqv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_cmeqv16qi ((int8x16_t) __a,
+                                                  (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vceqq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_cmeqv8hi ((int16x8_t) __a,
+                                                 (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_cmeqv4si ((int32x4_t) __a,
+                                                 (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vceqq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_cmeqv2di ((int64x2_t) __a,
+                                                 (int64x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceqd_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmeqdi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceqd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmeqdi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceqzd_s64 (int64x1_t __a)
+{
+  return (uint64x1_t) __builtin_aarch64_cmeqdi (__a, 0);
+}
+
+/* vcge */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcge_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_cmgev8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcge_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_cmgev4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_cmgev2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcge_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmgedi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcge_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_cmhsv8qi ((int8x8_t) __a,
+                                                (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcge_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_cmhsv4hi ((int16x4_t) __a,
+                                                 (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_cmhsv2si ((int32x2_t) __a,
+                                                 (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcge_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmhsdi ((int64x1_t) __a,
+                                               (int64x1_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgeq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_cmgev16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgeq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_cmgev8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_cmgev4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgeq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_cmgev2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgeq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_cmhsv16qi ((int8x16_t) __a,
+                                                  (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgeq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_cmhsv8hi ((int16x8_t) __a,
+                                                 (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_cmhsv4si ((int32x4_t) __a,
+                                                 (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgeq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_cmhsv2di ((int64x2_t) __a,
+                                                 (int64x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcged_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmgedi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcged_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmhsdi ((int64x1_t) __a,
+                                               (int64x1_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgezd_s64 (int64x1_t __a)
+{
+  return (uint64x1_t) __builtin_aarch64_cmgedi (__a, 0);
+}
+
+/* vcgt */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgt_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_cmgtv8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgt_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_cmgtv4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_cmgtv2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgt_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmgtdi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgt_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_cmhiv8qi ((int8x8_t) __a,
+                                                (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgt_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_cmhiv4hi ((int16x4_t) __a,
+                                                 (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_cmhiv2si ((int32x2_t) __a,
+                                                 (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgt_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmhidi ((int64x1_t) __a,
+                                               (int64x1_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_cmgtv16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgtq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_cmgtv8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_cmgtv4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgtq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_cmgtv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_cmhiv16qi ((int8x16_t) __a,
+                                                  (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgtq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_cmhiv8hi ((int16x8_t) __a,
+                                                 (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_cmhiv4si ((int32x4_t) __a,
+                                                 (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgtq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_cmhiv2di ((int64x2_t) __a,
+                                                 (int64x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgtd_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmgtdi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgtd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmhidi ((int64x1_t) __a,
+                                               (int64x1_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgtzd_s64 (int64x1_t __a)
+{
+  return (uint64x1_t) __builtin_aarch64_cmgtdi (__a, 0);
+}
+
+/* vcle */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcle_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_cmgev8qi (__b, __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcle_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_cmgev4hi (__b, __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_cmgev2si (__b, __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcle_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmgedi (__b, __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcle_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_cmhsv8qi ((int8x8_t) __b,
+                                                (int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcle_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_cmhsv4hi ((int16x4_t) __b,
+                                                 (int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_cmhsv2si ((int32x2_t) __b,
+                                                 (int32x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcle_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmhsdi ((int64x1_t) __b,
+                                               (int64x1_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcleq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_cmgev16qi (__b, __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcleq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_cmgev8hi (__b, __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_cmgev4si (__b, __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcleq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_cmgev2di (__b, __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcleq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_cmhsv16qi ((int8x16_t) __b,
+                                                  (int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcleq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_cmhsv8hi ((int16x8_t) __b,
+                                                 (int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_cmhsv4si ((int32x4_t) __b,
+                                                 (int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcleq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_cmhsv2di ((int64x2_t) __b,
+                                                 (int64x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcled_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmgedi (__b, __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclezd_s64 (int64x1_t __a)
+{
+  return (uint64x1_t) __builtin_aarch64_cmledi (__a, 0);
+}
+
+/* vclt */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclt_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_cmgtv8qi (__b, __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclt_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_cmgtv4hi (__b, __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_cmgtv2si (__b, __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclt_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmgtdi (__b, __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclt_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_cmhiv8qi ((int8x8_t) __b,
+                                                (int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclt_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_cmhiv4hi ((int16x4_t) __b,
+                                                 (int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_cmhiv2si ((int32x2_t) __b,
+                                                 (int32x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclt_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmhidi ((int64x1_t) __b,
+                                               (int64x1_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcltq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_cmgtv16qi (__b, __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcltq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_cmgtv8hi (__b, __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_cmgtv4si (__b, __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcltq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_cmgtv2di (__b, __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcltq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_cmhiv16qi ((int8x16_t) __b,
+                                                  (int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcltq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_cmhiv8hi ((int16x8_t) __b,
+                                                 (int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_cmhiv4si ((int32x4_t) __b,
+                                                 (int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcltq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_cmhiv2di ((int64x2_t) __b,
+                                                 (int64x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcltd_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmgtdi (__b, __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcltzd_s64 (int64x1_t __a)
+{
+  return (uint64x1_t) __builtin_aarch64_cmltdi (__a, 0);
+}
+
+/* vdup */
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vdupb_lane_s8 (int8x16_t a, int const b)
+{
+  return __builtin_aarch64_dup_laneqi (a, b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vdupb_lane_u8 (uint8x16_t a, int const b)
+{
+  return (uint8x1_t) __builtin_aarch64_dup_laneqi ((int8x16_t) a, b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vduph_lane_s16 (int16x8_t a, int const b)
+{
+  return __builtin_aarch64_dup_lanehi (a, b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vduph_lane_u16 (uint16x8_t a, int const b)
+{
+  return (uint16x1_t) __builtin_aarch64_dup_lanehi ((int16x8_t) a, b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vdups_lane_s32 (int32x4_t a, int const b)
+{
+  return __builtin_aarch64_dup_lanesi (a, b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vdups_lane_u32 (uint32x4_t a, int const b)
+{
+  return (uint32x1_t) __builtin_aarch64_dup_lanesi ((int32x4_t) a, b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vdupd_lane_s64 (int64x2_t a, int const b)
+{
+  return __builtin_aarch64_dup_lanedi (a, b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vdupd_lane_u64 (uint64x2_t a, int const b)
+{
+  return (uint64x1_t) __builtin_aarch64_dup_lanedi ((int64x2_t) a, b);
+}
+
+/* vldn */
+
+__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
+vld2_s64 (const int64_t * __a)
+{
+  int64x1x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
+  ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline uint64x1x2_t __attribute__ ((__always_inline__))
+vld2_u64 (const uint64_t * __a)
+{
+  uint64x1x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
+  ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline float64x1x2_t __attribute__ ((__always_inline__))
+vld2_f64 (const float64_t * __a)
+{
+  float64x1x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2df ((const __builtin_aarch64_simd_df *) __a);
+  ret.val[0] = (float64x1_t) __builtin_aarch64_get_dregoidf (__o, 0);
+  ret.val[1] = (float64x1_t) __builtin_aarch64_get_dregoidf (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vld2_s8 (const int8_t * __a)
+{
+  int8x8x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
+  ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vld2_p8 (const poly8_t * __a)
+{
+  poly8x8x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
+  ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vld2_s16 (const int16_t * __a)
+{
+  int16x4x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
+  ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vld2_p16 (const poly16_t * __a)
+{
+  poly16x4x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
+  ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vld2_s32 (const int32_t * __a)
+{
+  int32x2x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v2si ((const __builtin_aarch64_simd_si *) __a);
+  ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0);
+  ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vld2_u8 (const uint8_t * __a)
+{
+  uint8x8x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
+  ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vld2_u16 (const uint16_t * __a)
+{
+  uint16x4x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
+  ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vld2_u32 (const uint32_t * __a)
+{
+  uint32x2x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v2si ((const __builtin_aarch64_simd_si *) __a);
+  ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0);
+  ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vld2_f32 (const float32_t * __a)
+{
+  float32x2x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v2sf ((const __builtin_aarch64_simd_sf *) __a);
+  ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 0);
+  ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vld2q_s8 (const int8_t * __a)
+{
+  int8x16x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
+  ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vld2q_p8 (const poly8_t * __a)
+{
+  poly8x16x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
+  ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vld2q_s16 (const int16_t * __a)
+{
+  int16x8x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
+  ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vld2q_p16 (const poly16_t * __a)
+{
+  poly16x8x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
+  ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vld2q_s32 (const int32_t * __a)
+{
+  int32x4x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v4si ((const __builtin_aarch64_simd_si *) __a);
+  ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+  ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline int64x2x2_t __attribute__ ((__always_inline__))
+vld2q_s64 (const int64_t * __a)
+{
+  int64x2x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0);
+  ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vld2q_u8 (const uint8_t * __a)
+{
+  uint8x16x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
+  ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vld2q_u16 (const uint16_t * __a)
+{
+  uint16x8x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
+  ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vld2q_u32 (const uint32_t * __a)
+{
+  uint32x4x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v4si ((const __builtin_aarch64_simd_si *) __a);
+  ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+  ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline uint64x2x2_t __attribute__ ((__always_inline__))
+vld2q_u64 (const uint64_t * __a)
+{
+  uint64x2x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0);
+  ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vld2q_f32 (const float32_t * __a)
+{
+  float32x4x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v4sf ((const __builtin_aarch64_simd_sf *) __a);
+  ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 0);
+  ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline float64x2x2_t __attribute__ ((__always_inline__))
+vld2q_f64 (const float64_t * __a)
+{
+  float64x2x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v2df ((const __builtin_aarch64_simd_df *) __a);
+  ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 0);
+  ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 1);
+  return ret;
+}
+
+__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
+vld3_s64 (const int64_t * __a)
+{
+  int64x1x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
+  ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
+  ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline uint64x1x3_t __attribute__ ((__always_inline__))
+vld3_u64 (const uint64_t * __a)
+{
+  uint64x1x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
+  ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
+  ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline float64x1x3_t __attribute__ ((__always_inline__))
+vld3_f64 (const float64_t * __a)
+{
+  float64x1x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3df ((const __builtin_aarch64_simd_df *) __a);
+  ret.val[0] = (float64x1_t) __builtin_aarch64_get_dregcidf (__o, 0);
+  ret.val[1] = (float64x1_t) __builtin_aarch64_get_dregcidf (__o, 1);
+  ret.val[2] = (float64x1_t) __builtin_aarch64_get_dregcidf (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__))
+vld3_s8 (const int8_t * __a)
+{
+  int8x8x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
+  ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
+  ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__))
+vld3_p8 (const poly8_t * __a)
+{
+  poly8x8x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
+  ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
+  ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__))
+vld3_s16 (const int16_t * __a)
+{
+  int16x4x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
+  ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
+  ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__))
+vld3_p16 (const poly16_t * __a)
+{
+  poly16x4x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
+  ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
+  ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__))
+vld3_s32 (const int32_t * __a)
+{
+  int32x2x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v2si ((const __builtin_aarch64_simd_si *) __a);
+  ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0);
+  ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1);
+  ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__))
+vld3_u8 (const uint8_t * __a)
+{
+  uint8x8x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
+  ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
+  ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__))
+vld3_u16 (const uint16_t * __a)
+{
+  uint16x4x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
+  ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
+  ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__))
+vld3_u32 (const uint32_t * __a)
+{
+  uint32x2x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v2si ((const __builtin_aarch64_simd_si *) __a);
+  ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0);
+  ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1);
+  ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__))
+vld3_f32 (const float32_t * __a)
+{
+  float32x2x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v2sf ((const __builtin_aarch64_simd_sf *) __a);
+  ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 0);
+  ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 1);
+  ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline int8x16x3_t __attribute__ ((__always_inline__))
+vld3q_s8 (const int8_t * __a)
+{
+  int8x16x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
+  ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
+  ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline poly8x16x3_t __attribute__ ((__always_inline__))
+vld3q_p8 (const poly8_t * __a)
+{
+  poly8x16x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
+  ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
+  ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline int16x8x3_t __attribute__ ((__always_inline__))
+vld3q_s16 (const int16_t * __a)
+{
+  int16x8x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
+  ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
+  ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline poly16x8x3_t __attribute__ ((__always_inline__))
+vld3q_p16 (const poly16_t * __a)
+{
+  poly16x8x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
+  ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
+  ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline int32x4x3_t __attribute__ ((__always_inline__))
+vld3q_s32 (const int32_t * __a)
+{
+  int32x4x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v4si ((const __builtin_aarch64_simd_si *) __a);
+  ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0);
+  ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1);
+  ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline int64x2x3_t __attribute__ ((__always_inline__))
+vld3q_s64 (const int64_t * __a)
+{
+  int64x2x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0);
+  ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1);
+  ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline uint8x16x3_t __attribute__ ((__always_inline__))
+vld3q_u8 (const uint8_t * __a)
+{
+  uint8x16x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
+  ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
+  ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline uint16x8x3_t __attribute__ ((__always_inline__))
+vld3q_u16 (const uint16_t * __a)
+{
+  uint16x8x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
+  ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
+  ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline uint32x4x3_t __attribute__ ((__always_inline__))
+vld3q_u32 (const uint32_t * __a)
+{
+  uint32x4x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v4si ((const __builtin_aarch64_simd_si *) __a);
+  ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0);
+  ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1);
+  ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline uint64x2x3_t __attribute__ ((__always_inline__))
+vld3q_u64 (const uint64_t * __a)
+{
+  uint64x2x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0);
+  ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1);
+  ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline float32x4x3_t __attribute__ ((__always_inline__))
+vld3q_f32 (const float32_t * __a)
+{
+  float32x4x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v4sf ((const __builtin_aarch64_simd_sf *) __a);
+  ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 0);
+  ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 1);
+  ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline float64x2x3_t __attribute__ ((__always_inline__))
+vld3q_f64 (const float64_t * __a)
+{
+  float64x2x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v2df ((const __builtin_aarch64_simd_df *) __a);
+  ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 0);
+  ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 1);
+  ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 2);
+  return ret;
+}
+
+__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
+vld4_s64 (const int64_t * __a)
+{
+  int64x1x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 0);
+  ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 1);
+  ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 2);
+  ret.val[3] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline uint64x1x4_t __attribute__ ((__always_inline__))
+vld4_u64 (const uint64_t * __a)
+{
+  uint64x1x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 0);
+  ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 1);
+  ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 2);
+  ret.val[3] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline float64x1x4_t __attribute__ ((__always_inline__))
+vld4_f64 (const float64_t * __a)
+{
+  float64x1x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4df ((const __builtin_aarch64_simd_df *) __a);
+  ret.val[0] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 0);
+  ret.val[1] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 1);
+  ret.val[2] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 2);
+  ret.val[3] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__))
+vld4_s8 (const int8_t * __a)
+{
+  int8x8x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
+  ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
+  ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
+  ret.val[3] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__))
+vld4_p8 (const poly8_t * __a)
+{
+  poly8x8x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
+  ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
+  ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
+  ret.val[3] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__))
+vld4_s16 (const int16_t * __a)
+{
+  int16x4x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
+  ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
+  ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
+  ret.val[3] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__))
+vld4_p16 (const poly16_t * __a)
+{
+  poly16x4x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
+  ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
+  ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
+  ret.val[3] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__))
+vld4_s32 (const int32_t * __a)
+{
+  int32x2x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v2si ((const __builtin_aarch64_simd_si *) __a);
+  ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0);
+  ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1);
+  ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2);
+  ret.val[3] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__))
+vld4_u8 (const uint8_t * __a)
+{
+  uint8x8x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
+  ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
+  ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
+  ret.val[3] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__))
+vld4_u16 (const uint16_t * __a)
+{
+  uint16x4x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
+  ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
+  ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
+  ret.val[3] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__))
+vld4_u32 (const uint32_t * __a)
+{
+  uint32x2x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v2si ((const __builtin_aarch64_simd_si *) __a);
+  ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0);
+  ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1);
+  ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2);
+  ret.val[3] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__))
+vld4_f32 (const float32_t * __a)
+{
+  float32x2x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v2sf ((const __builtin_aarch64_simd_sf *) __a);
+  ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 0);
+  ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 1);
+  ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 2);
+  ret.val[3] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline int8x16x4_t __attribute__ ((__always_inline__))
+vld4q_s8 (const int8_t * __a)
+{
+  int8x16x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
+  ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
+  ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
+  ret.val[3] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline poly8x16x4_t __attribute__ ((__always_inline__))
+vld4q_p8 (const poly8_t * __a)
+{
+  poly8x16x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
+  ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
+  ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
+  ret.val[3] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline int16x8x4_t __attribute__ ((__always_inline__))
+vld4q_s16 (const int16_t * __a)
+{
+  int16x8x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
+  ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
+  ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
+  ret.val[3] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline poly16x8x4_t __attribute__ ((__always_inline__))
+vld4q_p16 (const poly16_t * __a)
+{
+  poly16x8x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
+  ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
+  ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
+  ret.val[3] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline int32x4x4_t __attribute__ ((__always_inline__))
+vld4q_s32 (const int32_t * __a)
+{
+  int32x4x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v4si ((const __builtin_aarch64_simd_si *) __a);
+  ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0);
+  ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1);
+  ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2);
+  ret.val[3] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline int64x2x4_t __attribute__ ((__always_inline__))
+vld4q_s64 (const int64_t * __a)
+{
+  int64x2x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0);
+  ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1);
+  ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2);
+  ret.val[3] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline uint8x16x4_t __attribute__ ((__always_inline__))
+vld4q_u8 (const uint8_t * __a)
+{
+  uint8x16x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a);
+  ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
+  ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
+  ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
+  ret.val[3] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline uint16x8x4_t __attribute__ ((__always_inline__))
+vld4q_u16 (const uint16_t * __a)
+{
+  uint16x8x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a);
+  ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
+  ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
+  ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
+  ret.val[3] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline uint32x4x4_t __attribute__ ((__always_inline__))
+vld4q_u32 (const uint32_t * __a)
+{
+  uint32x4x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v4si ((const __builtin_aarch64_simd_si *) __a);
+  ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0);
+  ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1);
+  ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2);
+  ret.val[3] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline uint64x2x4_t __attribute__ ((__always_inline__))
+vld4q_u64 (const uint64_t * __a)
+{
+  uint64x2x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0);
+  ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1);
+  ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2);
+  ret.val[3] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline float32x4x4_t __attribute__ ((__always_inline__))
+vld4q_f32 (const float32_t * __a)
+{
+  float32x4x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v4sf ((const __builtin_aarch64_simd_sf *) __a);
+  ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 0);
+  ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 1);
+  ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 2);
+  ret.val[3] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 3);
+  return ret;
+}
+
+__extension__ static __inline float64x2x4_t __attribute__ ((__always_inline__))
+vld4q_f64 (const float64_t * __a)
+{
+  float64x2x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v2df ((const __builtin_aarch64_simd_df *) __a);
+  ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 0);
+  ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 1);
+  ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 2);
+  ret.val[3] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 3);
+  return ret;
+}
+
+/* vmax */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmax_f32 (float32x2_t __a, float32x2_t __b)
+{
+  return __builtin_aarch64_fmaxv2sf (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmax_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return __builtin_aarch64_smaxv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmax_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return __builtin_aarch64_smaxv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmax_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return __builtin_aarch64_smaxv2si (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmax_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_umaxv8qi ((int8x8_t) __a,
+                                                (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmax_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_umaxv4hi ((int16x4_t) __a,
+                                                 (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmax_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_umaxv2si ((int32x2_t) __a,
+                                                 (int32x2_t) __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmaxq_f32 (float32x4_t __a, float32x4_t __b)
+{
+  return __builtin_aarch64_fmaxv4sf (__a, __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmaxq_f64 (float64x2_t __a, float64x2_t __b)
+{
+  return __builtin_aarch64_fmaxv2df (__a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmaxq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return __builtin_aarch64_smaxv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmaxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return __builtin_aarch64_smaxv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmaxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return __builtin_aarch64_smaxv4si (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_umaxv16qi ((int8x16_t) __a,
+                                                  (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_umaxv8hi ((int16x8_t) __a,
+                                                 (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_umaxv4si ((int32x4_t) __a,
+                                                 (int32x4_t) __b);
+}
+
+/* vmin */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmin_f32 (float32x2_t __a, float32x2_t __b)
+{
+  return __builtin_aarch64_fminv2sf (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmin_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return __builtin_aarch64_sminv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmin_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return __builtin_aarch64_sminv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmin_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return __builtin_aarch64_sminv2si (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmin_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_uminv8qi ((int8x8_t) __a,
+                                                (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmin_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_uminv4hi ((int16x4_t) __a,
+                                                 (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmin_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_uminv2si ((int32x2_t) __a,
+                                                 (int32x2_t) __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vminq_f32 (float32x4_t __a, float32x4_t __b)
+{
+  return __builtin_aarch64_fminv4sf (__a, __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vminq_f64 (float64x2_t __a, float64x2_t __b)
+{
+  return __builtin_aarch64_fminv2df (__a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vminq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return __builtin_aarch64_sminv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vminq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return __builtin_aarch64_sminv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vminq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return __builtin_aarch64_sminv4si (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vminq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_uminv16qi ((int8x16_t) __a,
+                                                  (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vminq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_uminv8hi ((int16x8_t) __a,
+                                                 (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vminq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_uminv4si ((int32x4_t) __a,
+                                                 (int32x4_t) __b);
+}
+
+/* vmla */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
+{
+  return a + b * c;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
+{
+  return a + b * c;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmlaq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
+{
+  return a + b * c;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
+{
+  return a - b * c;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
+{
+  return a - b * c;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmlsq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
+{
+  return a - b * c;
+}
+
+/* vqabs */
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqabsq_s64 (int64x2_t __a)
+{
+  return (int64x2_t) __builtin_aarch64_sqabsv2di (__a);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqabsb_s8 (int8x1_t __a)
+{
+  return (int8x1_t) __builtin_aarch64_sqabsqi (__a);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqabsh_s16 (int16x1_t __a)
+{
+  return (int16x1_t) __builtin_aarch64_sqabshi (__a);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqabss_s32 (int32x1_t __a)
+{
+  return (int32x1_t) __builtin_aarch64_sqabssi (__a);
+}
+
+/* vqadd */
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqaddb_s8 (int8x1_t __a, int8x1_t __b)
+{
+  return (int8x1_t) __builtin_aarch64_sqaddqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqaddh_s16 (int16x1_t __a, int16x1_t __b)
+{
+  return (int16x1_t) __builtin_aarch64_sqaddhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqadds_s32 (int32x1_t __a, int32x1_t __b)
+{
+  return (int32x1_t) __builtin_aarch64_sqaddsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqaddd_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (int64x1_t) __builtin_aarch64_sqadddi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqaddb_u8 (uint8x1_t __a, uint8x1_t __b)
+{
+  return (uint8x1_t) __builtin_aarch64_uqaddqi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqaddh_u16 (uint16x1_t __a, uint16x1_t __b)
+{
+  return (uint16x1_t) __builtin_aarch64_uqaddhi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqadds_u32 (uint32x1_t __a, uint32x1_t __b)
+{
+  return (uint32x1_t) __builtin_aarch64_uqaddsi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqaddd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_uqadddi (__a, __b);
+}
+
+/* vqdmlal */
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+  return __builtin_aarch64_sqdmlalv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
+{
+  return __builtin_aarch64_sqdmlal2v8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_high_lane_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
+                      int const __d)
+{
+  return __builtin_aarch64_sqdmlal2_lanev8hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_high_laneq_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
+                       int const __d)
+{
+  return __builtin_aarch64_sqdmlal2_laneqv8hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
+{
+  return __builtin_aarch64_sqdmlal2_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d)
+{
+  int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (INT64_C (0)));
+  return __builtin_aarch64_sqdmlal_lanev4hi (__a, __b, __tmp, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_laneq_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
+{
+  return __builtin_aarch64_sqdmlal_laneqv4hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+  return __builtin_aarch64_sqdmlal_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+  return __builtin_aarch64_sqdmlalv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
+{
+  return __builtin_aarch64_sqdmlal2v4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
+                      int const __d)
+{
+  return __builtin_aarch64_sqdmlal2_lanev4si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
+                       int const __d)
+{
+  return __builtin_aarch64_sqdmlal2_laneqv4si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
+{
+  return __builtin_aarch64_sqdmlal2_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d)
+{
+  int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (INT64_C (0)));
+  return __builtin_aarch64_sqdmlal_lanev2si (__a, __b, __tmp, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
+{
+  return __builtin_aarch64_sqdmlal_laneqv2si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+  return __builtin_aarch64_sqdmlal_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmlalh_s16 (int32x1_t __a, int16x1_t __b, int16x1_t __c)
+{
+  return __builtin_aarch64_sqdmlalhi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmlalh_lane_s16 (int32x1_t __a, int16x1_t __b, int16x8_t __c, const int __d)
+{
+  return __builtin_aarch64_sqdmlal_lanehi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmlals_s32 (int64x1_t __a, int32x1_t __b, int32x1_t __c)
+{
+  return __builtin_aarch64_sqdmlalsi (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmlals_lane_s32 (int64x1_t __a, int32x1_t __b, int32x4_t __c, const int __d)
+{
+  return __builtin_aarch64_sqdmlal_lanesi (__a, __b, __c, __d);
+}
+
+/* vqdmlsl */
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+  return __builtin_aarch64_sqdmlslv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
+{
+  return __builtin_aarch64_sqdmlsl2v8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_high_lane_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
+                      int const __d)
+{
+  return __builtin_aarch64_sqdmlsl2_lanev8hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_high_laneq_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
+                       int const __d)
+{
+  return __builtin_aarch64_sqdmlsl2_laneqv8hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
+{
+  return __builtin_aarch64_sqdmlsl2_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d)
+{
+  int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (INT64_C (0)));
+  return __builtin_aarch64_sqdmlsl_lanev4hi (__a, __b, __tmp, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_laneq_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
+{
+  return __builtin_aarch64_sqdmlsl_laneqv4hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+  return __builtin_aarch64_sqdmlsl_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+  return __builtin_aarch64_sqdmlslv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
+{
+  return __builtin_aarch64_sqdmlsl2v4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
+                      int const __d)
+{
+  return __builtin_aarch64_sqdmlsl2_lanev4si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
+                       int const __d)
+{
+  return __builtin_aarch64_sqdmlsl2_laneqv4si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
+{
+  return __builtin_aarch64_sqdmlsl2_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d)
+{
+  int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (INT64_C (0)));
+  return __builtin_aarch64_sqdmlsl_lanev2si (__a, __b, __tmp, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
+{
+  return __builtin_aarch64_sqdmlsl_laneqv2si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+  return __builtin_aarch64_sqdmlsl_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmlslh_s16 (int32x1_t __a, int16x1_t __b, int16x1_t __c)
+{
+  return __builtin_aarch64_sqdmlslhi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmlslh_lane_s16 (int32x1_t __a, int16x1_t __b, int16x8_t __c, const int __d)
+{
+  return __builtin_aarch64_sqdmlsl_lanehi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmlsls_s32 (int64x1_t __a, int32x1_t __b, int32x1_t __c)
+{
+  return __builtin_aarch64_sqdmlslsi (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmlsls_lane_s32 (int64x1_t __a, int32x1_t __b, int32x4_t __c, const int __d)
+{
+  return __builtin_aarch64_sqdmlsl_lanesi (__a, __b, __c, __d);
+}
+
+/* vqdmulh */
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+  int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
+  return __builtin_aarch64_sqdmulh_lanev4hi (__a, __tmp, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+  int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
+  return __builtin_aarch64_sqdmulh_lanev2si (__a, __tmp, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+  int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
+  return __builtin_aarch64_sqdmulh_lanev8hi (__a, __tmp, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+  int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
+  return __builtin_aarch64_sqdmulh_lanev4si (__a, __tmp, __c);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqdmulhh_s16 (int16x1_t __a, int16x1_t __b)
+{
+  return (int16x1_t) __builtin_aarch64_sqdmulhhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqdmulhh_lane_s16 (int16x1_t __a, int16x8_t __b, const int __c)
+{
+  return __builtin_aarch64_sqdmulh_lanehi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmulhs_s32 (int32x1_t __a, int32x1_t __b)
+{
+  return (int32x1_t) __builtin_aarch64_sqdmulhsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmulhs_lane_s32 (int32x1_t __a, int32x4_t __b, const int __c)
+{
+  return __builtin_aarch64_sqdmulh_lanesi (__a, __b, __c);
+}
+
+/* vqdmull */
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return __builtin_aarch64_sqdmullv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_high_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return __builtin_aarch64_sqdmull2v8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_high_lane_s16 (int16x8_t __a, int16x8_t __b, int const __c)
+{
+  return __builtin_aarch64_sqdmull2_lanev8hi (__a, __b,__c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_high_laneq_s16 (int16x8_t __a, int16x8_t __b, int const __c)
+{
+  return __builtin_aarch64_sqdmull2_laneqv8hi (__a, __b,__c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_high_n_s16 (int16x8_t __a, int16_t __b)
+{
+  return __builtin_aarch64_sqdmull2_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_lane_s16 (int16x4_t __a, int16x4_t __b, int const __c)
+{
+  int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
+  return __builtin_aarch64_sqdmull_lanev4hi (__a, __tmp, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_laneq_s16 (int16x4_t __a, int16x8_t __b, int const __c)
+{
+  return __builtin_aarch64_sqdmull_laneqv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_n_s16 (int16x4_t __a, int16_t __b)
+{
+  return __builtin_aarch64_sqdmull_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return __builtin_aarch64_sqdmullv2si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_high_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return __builtin_aarch64_sqdmull2v4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_high_lane_s32 (int32x4_t __a, int32x4_t __b, int const __c)
+{
+  return __builtin_aarch64_sqdmull2_lanev4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_high_laneq_s32 (int32x4_t __a, int32x4_t __b, int const __c)
+{
+  return __builtin_aarch64_sqdmull2_laneqv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_high_n_s32 (int32x4_t __a, int32_t __b)
+{
+  return __builtin_aarch64_sqdmull2_nv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b, int const __c)
+{
+  int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
+  return __builtin_aarch64_sqdmull_lanev2si (__a, __tmp, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_laneq_s32 (int32x2_t __a, int32x4_t __b, int const __c)
+{
+  return __builtin_aarch64_sqdmull_laneqv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_n_s32 (int32x2_t __a, int32_t __b)
+{
+  return __builtin_aarch64_sqdmull_nv2si (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmullh_s16 (int16x1_t __a, int16x1_t __b)
+{
+  return (int32x1_t) __builtin_aarch64_sqdmullhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmullh_lane_s16 (int16x1_t __a, int16x8_t __b, const int __c)
+{
+  return __builtin_aarch64_sqdmull_lanehi (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmulls_s32 (int32x1_t __a, int32x1_t __b)
+{
+  return (int64x1_t) __builtin_aarch64_sqdmullsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmulls_lane_s32 (int32x1_t __a, int32x4_t __b, const int __c)
+{
+  return __builtin_aarch64_sqdmull_lanesi (__a, __b, __c);
+}
+
+/* vqmovn */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqmovn_s16 (int16x8_t __a)
+{
+  return (int8x8_t) __builtin_aarch64_sqmovnv8hi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqmovn_s32 (int32x4_t __a)
+{
+  return (int16x4_t) __builtin_aarch64_sqmovnv4si (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqmovn_s64 (int64x2_t __a)
+{
+  return (int32x2_t) __builtin_aarch64_sqmovnv2di (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqmovn_u16 (uint16x8_t __a)
+{
+  return (uint8x8_t) __builtin_aarch64_uqmovnv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqmovn_u32 (uint32x4_t __a)
+{
+  return (uint16x4_t) __builtin_aarch64_uqmovnv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqmovn_u64 (uint64x2_t __a)
+{
+  return (uint32x2_t) __builtin_aarch64_uqmovnv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqmovnh_s16 (int16x1_t __a)
+{
+  return (int8x1_t) __builtin_aarch64_sqmovnhi (__a);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqmovns_s32 (int32x1_t __a)
+{
+  return (int16x1_t) __builtin_aarch64_sqmovnsi (__a);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqmovnd_s64 (int64x1_t __a)
+{
+  return (int32x1_t) __builtin_aarch64_sqmovndi (__a);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqmovnh_u16 (uint16x1_t __a)
+{
+  return (uint8x1_t) __builtin_aarch64_uqmovnhi (__a);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqmovns_u32 (uint32x1_t __a)
+{
+  return (uint16x1_t) __builtin_aarch64_uqmovnsi (__a);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqmovnd_u64 (uint64x1_t __a)
+{
+  return (uint32x1_t) __builtin_aarch64_uqmovndi (__a);
+}
+
+/* vqmovun */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqmovun_s16 (int16x8_t __a)
+{
+  return (uint8x8_t) __builtin_aarch64_sqmovunv8hi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqmovun_s32 (int32x4_t __a)
+{
+  return (uint16x4_t) __builtin_aarch64_sqmovunv4si (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqmovun_s64 (int64x2_t __a)
+{
+  return (uint32x2_t) __builtin_aarch64_sqmovunv2di (__a);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqmovunh_s16 (int16x1_t __a)
+{
+  return (int8x1_t) __builtin_aarch64_sqmovunhi (__a);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqmovuns_s32 (int32x1_t __a)
+{
+  return (int16x1_t) __builtin_aarch64_sqmovunsi (__a);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqmovund_s64 (int64x1_t __a)
+{
+  return (int32x1_t) __builtin_aarch64_sqmovundi (__a);
+}
+
+/* vqneg */
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqnegq_s64 (int64x2_t __a)
+{
+  return (int64x2_t) __builtin_aarch64_sqnegv2di (__a);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqnegb_s8 (int8x1_t __a)
+{
+  return (int8x1_t) __builtin_aarch64_sqnegqi (__a);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqnegh_s16 (int16x1_t __a)
+{
+  return (int16x1_t) __builtin_aarch64_sqneghi (__a);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqnegs_s32 (int32x1_t __a)
+{
+  return (int32x1_t) __builtin_aarch64_sqnegsi (__a);
+}
+
+/* vqrdmulh */
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+  int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
+  return  __builtin_aarch64_sqrdmulh_lanev4hi (__a, __tmp, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+  int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
+  return __builtin_aarch64_sqrdmulh_lanev2si (__a, __tmp, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+  int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
+  return __builtin_aarch64_sqrdmulh_lanev8hi (__a, __tmp, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+  int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
+  return __builtin_aarch64_sqrdmulh_lanev4si (__a, __tmp, __c);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqrdmulhh_s16 (int16x1_t __a, int16x1_t __b)
+{
+  return (int16x1_t) __builtin_aarch64_sqrdmulhhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqrdmulhh_lane_s16 (int16x1_t __a, int16x8_t __b, const int __c)
+{
+  return __builtin_aarch64_sqrdmulh_lanehi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqrdmulhs_s32 (int32x1_t __a, int32x1_t __b)
+{
+  return (int32x1_t) __builtin_aarch64_sqrdmulhsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqrdmulhs_lane_s32 (int32x1_t __a, int32x4_t __b, const int __c)
+{
+  return __builtin_aarch64_sqrdmulh_lanesi (__a, __b, __c);
+}
+
+/* vqrshl */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqrshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return __builtin_aarch64_sqrshlv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return __builtin_aarch64_sqrshlv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return __builtin_aarch64_sqrshlv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqrshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return __builtin_aarch64_sqrshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_uqrshlv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_uqrshlv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_uqrshlv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqrshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_uqrshldi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return __builtin_aarch64_sqrshlv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return __builtin_aarch64_sqrshlv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return __builtin_aarch64_sqrshlv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqrshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return __builtin_aarch64_sqrshlv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_uqrshlv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_uqrshlv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_uqrshlv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqrshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_uqrshlv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqrshlb_s8 (int8x1_t __a, int8x1_t __b)
+{
+  return __builtin_aarch64_sqrshlqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqrshlh_s16 (int16x1_t __a, int16x1_t __b)
+{
+  return __builtin_aarch64_sqrshlhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqrshls_s32 (int32x1_t __a, int32x1_t __b)
+{
+  return __builtin_aarch64_sqrshlsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqrshld_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return __builtin_aarch64_sqrshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqrshlb_u8 (uint8x1_t __a, uint8x1_t __b)
+{
+  return (uint8x1_t) __builtin_aarch64_uqrshlqi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqrshlh_u16 (uint16x1_t __a, uint16x1_t __b)
+{
+  return (uint16x1_t) __builtin_aarch64_uqrshlhi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqrshls_u32 (uint32x1_t __a, uint32x1_t __b)
+{
+  return (uint32x1_t) __builtin_aarch64_uqrshlsi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqrshld_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_uqrshldi (__a, __b);
+}
+
+/* vqrshrn */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqrshrn_n_s16 (int16x8_t __a, const int __b)
+{
+  return (int8x8_t) __builtin_aarch64_sqrshrn_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrshrn_n_s32 (int32x4_t __a, const int __b)
+{
+  return (int16x4_t) __builtin_aarch64_sqrshrn_nv4si (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrshrn_n_s64 (int64x2_t __a, const int __b)
+{
+  return (int32x2_t) __builtin_aarch64_sqrshrn_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+  return (uint8x8_t) __builtin_aarch64_uqrshrn_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+  return (uint16x4_t) __builtin_aarch64_uqrshrn_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+  return (uint32x2_t) __builtin_aarch64_uqrshrn_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqrshrnh_n_s16 (int16x1_t __a, const int __b)
+{
+  return (int8x1_t) __builtin_aarch64_sqrshrn_nhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqrshrns_n_s32 (int32x1_t __a, const int __b)
+{
+  return (int16x1_t) __builtin_aarch64_sqrshrn_nsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqrshrnd_n_s64 (int64x1_t __a, const int __b)
+{
+  return (int32x1_t) __builtin_aarch64_sqrshrn_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqrshrnh_n_u16 (uint16x1_t __a, const int __b)
+{
+  return (uint8x1_t) __builtin_aarch64_uqrshrn_nhi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqrshrns_n_u32 (uint32x1_t __a, const int __b)
+{
+  return (uint16x1_t) __builtin_aarch64_uqrshrn_nsi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqrshrnd_n_u64 (uint64x1_t __a, const int __b)
+{
+  return (uint32x1_t) __builtin_aarch64_uqrshrn_ndi (__a, __b);
+}
+
+/* vqrshrun */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshrun_n_s16 (int16x8_t __a, const int __b)
+{
+  return (uint8x8_t) __builtin_aarch64_sqrshrun_nv8hi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshrun_n_s32 (int32x4_t __a, const int __b)
+{
+  return (uint16x4_t) __builtin_aarch64_sqrshrun_nv4si (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshrun_n_s64 (int64x2_t __a, const int __b)
+{
+  return (uint32x2_t) __builtin_aarch64_sqrshrun_nv2di (__a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqrshrunh_n_s16 (int16x1_t __a, const int __b)
+{
+  return (int8x1_t) __builtin_aarch64_sqrshrun_nhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqrshruns_n_s32 (int32x1_t __a, const int __b)
+{
+  return (int16x1_t) __builtin_aarch64_sqrshrun_nsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqrshrund_n_s64 (int64x1_t __a, const int __b)
+{
+  return (int32x1_t) __builtin_aarch64_sqrshrun_ndi (__a, __b);
+}
+
+/* vqshl */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return __builtin_aarch64_sqshlv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return __builtin_aarch64_sqshlv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return __builtin_aarch64_sqshlv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return __builtin_aarch64_sqshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_uqshlv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_uqshlv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_uqshlv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_uqshldi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return __builtin_aarch64_sqshlv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return __builtin_aarch64_sqshlv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return __builtin_aarch64_sqshlv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return __builtin_aarch64_sqshlv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_uqshlv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_uqshlv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_uqshlv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_uqshlv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqshlb_s8 (int8x1_t __a, int8x1_t __b)
+{
+  return __builtin_aarch64_sqshlqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqshlh_s16 (int16x1_t __a, int16x1_t __b)
+{
+  return __builtin_aarch64_sqshlhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqshls_s32 (int32x1_t __a, int32x1_t __b)
+{
+  return __builtin_aarch64_sqshlsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshld_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return __builtin_aarch64_sqshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqshlb_u8 (uint8x1_t __a, uint8x1_t __b)
+{
+  return (uint8x1_t) __builtin_aarch64_uqshlqi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqshlh_u16 (uint16x1_t __a, uint16x1_t __b)
+{
+  return (uint16x1_t) __builtin_aarch64_uqshlhi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqshls_u32 (uint32x1_t __a, uint32x1_t __b)
+{
+  return (uint32x1_t) __builtin_aarch64_uqshlsi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshld_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_uqshldi (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshl_n_s8 (int8x8_t __a, const int __b)
+{
+  return (int8x8_t) __builtin_aarch64_sqshl_nv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshl_n_s16 (int16x4_t __a, const int __b)
+{
+  return (int16x4_t) __builtin_aarch64_sqshl_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshl_n_s32 (int32x2_t __a, const int __b)
+{
+  return (int32x2_t) __builtin_aarch64_sqshl_nv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshl_n_s64 (int64x1_t __a, const int __b)
+{
+  return (int64x1_t) __builtin_aarch64_sqshl_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshl_n_u8 (uint8x8_t __a, const int __b)
+{
+  return (uint8x8_t) __builtin_aarch64_uqshl_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshl_n_u16 (uint16x4_t __a, const int __b)
+{
+  return (uint16x4_t) __builtin_aarch64_uqshl_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshl_n_u32 (uint32x2_t __a, const int __b)
+{
+  return (uint32x2_t) __builtin_aarch64_uqshl_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshl_n_u64 (uint64x1_t __a, const int __b)
+{
+  return (uint64x1_t) __builtin_aarch64_uqshl_ndi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqshlq_n_s8 (int8x16_t __a, const int __b)
+{
+  return (int8x16_t) __builtin_aarch64_sqshl_nv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqshlq_n_s16 (int16x8_t __a, const int __b)
+{
+  return (int16x8_t) __builtin_aarch64_sqshl_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqshlq_n_s32 (int32x4_t __a, const int __b)
+{
+  return (int32x4_t) __builtin_aarch64_sqshl_nv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqshlq_n_s64 (int64x2_t __a, const int __b)
+{
+  return (int64x2_t) __builtin_aarch64_sqshl_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshlq_n_u8 (uint8x16_t __a, const int __b)
+{
+  return (uint8x16_t) __builtin_aarch64_uqshl_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshlq_n_u16 (uint16x8_t __a, const int __b)
+{
+  return (uint16x8_t) __builtin_aarch64_uqshl_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshlq_n_u32 (uint32x4_t __a, const int __b)
+{
+  return (uint32x4_t) __builtin_aarch64_uqshl_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshlq_n_u64 (uint64x2_t __a, const int __b)
+{
+  return (uint64x2_t) __builtin_aarch64_uqshl_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqshlb_n_s8 (int8x1_t __a, const int __b)
+{
+  return (int8x1_t) __builtin_aarch64_sqshl_nqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqshlh_n_s16 (int16x1_t __a, const int __b)
+{
+  return (int16x1_t) __builtin_aarch64_sqshl_nhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqshls_n_s32 (int32x1_t __a, const int __b)
+{
+  return (int32x1_t) __builtin_aarch64_sqshl_nsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshld_n_s64 (int64x1_t __a, const int __b)
+{
+  return (int64x1_t) __builtin_aarch64_sqshl_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqshlb_n_u8 (uint8x1_t __a, const int __b)
+{
+  return (uint8x1_t) __builtin_aarch64_uqshl_nqi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqshlh_n_u16 (uint16x1_t __a, const int __b)
+{
+  return (uint16x1_t) __builtin_aarch64_uqshl_nhi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqshls_n_u32 (uint32x1_t __a, const int __b)
+{
+  return (uint32x1_t) __builtin_aarch64_uqshl_nsi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshld_n_u64 (uint64x1_t __a, const int __b)
+{
+  return (uint64x1_t) __builtin_aarch64_uqshl_ndi (__a, __b);
+}
+
+/* vqshlu */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshlu_n_s8 (int8x8_t __a, const int __b)
+{
+  return (uint8x8_t) __builtin_aarch64_sqshlu_nv8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshlu_n_s16 (int16x4_t __a, const int __b)
+{
+  return (uint16x4_t) __builtin_aarch64_sqshlu_nv4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshlu_n_s32 (int32x2_t __a, const int __b)
+{
+  return (uint32x2_t) __builtin_aarch64_sqshlu_nv2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshlu_n_s64 (int64x1_t __a, const int __b)
+{
+  return (uint64x1_t) __builtin_aarch64_sqshlu_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshluq_n_s8 (int8x16_t __a, const int __b)
+{
+  return (uint8x16_t) __builtin_aarch64_sqshlu_nv16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshluq_n_s16 (int16x8_t __a, const int __b)
+{
+  return (uint16x8_t) __builtin_aarch64_sqshlu_nv8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshluq_n_s32 (int32x4_t __a, const int __b)
+{
+  return (uint32x4_t) __builtin_aarch64_sqshlu_nv4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshluq_n_s64 (int64x2_t __a, const int __b)
+{
+  return (uint64x2_t) __builtin_aarch64_sqshlu_nv2di (__a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqshlub_n_s8 (int8x1_t __a, const int __b)
+{
+  return (int8x1_t) __builtin_aarch64_sqshlu_nqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqshluh_n_s16 (int16x1_t __a, const int __b)
+{
+  return (int16x1_t) __builtin_aarch64_sqshlu_nhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqshlus_n_s32 (int32x1_t __a, const int __b)
+{
+  return (int32x1_t) __builtin_aarch64_sqshlu_nsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshlud_n_s64 (int64x1_t __a, const int __b)
+{
+  return (int64x1_t) __builtin_aarch64_sqshlu_ndi (__a, __b);
+}
+
+/* vqshrn */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshrn_n_s16 (int16x8_t __a, const int __b)
+{
+  return (int8x8_t) __builtin_aarch64_sqshrn_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshrn_n_s32 (int32x4_t __a, const int __b)
+{
+  return (int16x4_t) __builtin_aarch64_sqshrn_nv4si (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshrn_n_s64 (int64x2_t __a, const int __b)
+{
+  return (int32x2_t) __builtin_aarch64_sqshrn_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+  return (uint8x8_t) __builtin_aarch64_uqshrn_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+  return (uint16x4_t) __builtin_aarch64_uqshrn_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+  return (uint32x2_t) __builtin_aarch64_uqshrn_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqshrnh_n_s16 (int16x1_t __a, const int __b)
+{
+  return (int8x1_t) __builtin_aarch64_sqshrn_nhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqshrns_n_s32 (int32x1_t __a, const int __b)
+{
+  return (int16x1_t) __builtin_aarch64_sqshrn_nsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqshrnd_n_s64 (int64x1_t __a, const int __b)
+{
+  return (int32x1_t) __builtin_aarch64_sqshrn_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqshrnh_n_u16 (uint16x1_t __a, const int __b)
+{
+  return (uint8x1_t) __builtin_aarch64_uqshrn_nhi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqshrns_n_u32 (uint32x1_t __a, const int __b)
+{
+  return (uint16x1_t) __builtin_aarch64_uqshrn_nsi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqshrnd_n_u64 (uint64x1_t __a, const int __b)
+{
+  return (uint32x1_t) __builtin_aarch64_uqshrn_ndi (__a, __b);
+}
+
+/* vqshrun */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshrun_n_s16 (int16x8_t __a, const int __b)
+{
+  return (uint8x8_t) __builtin_aarch64_sqshrun_nv8hi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshrun_n_s32 (int32x4_t __a, const int __b)
+{
+  return (uint16x4_t) __builtin_aarch64_sqshrun_nv4si (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshrun_n_s64 (int64x2_t __a, const int __b)
+{
+  return (uint32x2_t) __builtin_aarch64_sqshrun_nv2di (__a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqshrunh_n_s16 (int16x1_t __a, const int __b)
+{
+  return (int8x1_t) __builtin_aarch64_sqshrun_nhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqshruns_n_s32 (int32x1_t __a, const int __b)
+{
+  return (int16x1_t) __builtin_aarch64_sqshrun_nsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqshrund_n_s64 (int64x1_t __a, const int __b)
+{
+  return (int32x1_t) __builtin_aarch64_sqshrun_ndi (__a, __b);
+}
+
+/* vqsub */
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqsubb_s8 (int8x1_t __a, int8x1_t __b)
+{
+  return (int8x1_t) __builtin_aarch64_sqsubqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqsubh_s16 (int16x1_t __a, int16x1_t __b)
+{
+  return (int16x1_t) __builtin_aarch64_sqsubhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqsubs_s32 (int32x1_t __a, int32x1_t __b)
+{
+  return (int32x1_t) __builtin_aarch64_sqsubsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqsubd_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (int64x1_t) __builtin_aarch64_sqsubdi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqsubb_u8 (uint8x1_t __a, uint8x1_t __b)
+{
+  return (uint8x1_t) __builtin_aarch64_uqsubqi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqsubh_u16 (uint16x1_t __a, uint16x1_t __b)
+{
+  return (uint16x1_t) __builtin_aarch64_uqsubhi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqsubs_u32 (uint32x1_t __a, uint32x1_t __b)
+{
+  return (uint32x1_t) __builtin_aarch64_uqsubsi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqsubd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_uqsubdi (__a, __b);
+}
+
+/* vrshl */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return (int8x8_t) __builtin_aarch64_srshlv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (int16x4_t) __builtin_aarch64_srshlv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (int32x2_t) __builtin_aarch64_srshlv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (int64x1_t) __builtin_aarch64_srshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_urshlv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_urshlv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_urshlv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_urshldi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return (int8x16_t) __builtin_aarch64_srshlv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_srshlv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_srshlv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return (int64x2_t) __builtin_aarch64_srshlv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_urshlv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_urshlv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_urshlv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_urshlv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshld_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (int64x1_t) __builtin_aarch64_srshldi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshld_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_urshldi (__a, __b);
+}
+
+/* vrshr */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrshr_n_s8 (int8x8_t __a, const int __b)
+{
+  return (int8x8_t) __builtin_aarch64_srshr_nv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrshr_n_s16 (int16x4_t __a, const int __b)
+{
+  return (int16x4_t) __builtin_aarch64_srshr_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrshr_n_s32 (int32x2_t __a, const int __b)
+{
+  return (int32x2_t) __builtin_aarch64_srshr_nv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshr_n_s64 (int64x1_t __a, const int __b)
+{
+  return (int64x1_t) __builtin_aarch64_srshr_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrshr_n_u8 (uint8x8_t __a, const int __b)
+{
+  return (uint8x8_t) __builtin_aarch64_urshr_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrshr_n_u16 (uint16x4_t __a, const int __b)
+{
+  return (uint16x4_t) __builtin_aarch64_urshr_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrshr_n_u32 (uint32x2_t __a, const int __b)
+{
+  return (uint32x2_t) __builtin_aarch64_urshr_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshr_n_u64 (uint64x1_t __a, const int __b)
+{
+  return (uint64x1_t) __builtin_aarch64_urshr_ndi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrshrq_n_s8 (int8x16_t __a, const int __b)
+{
+  return (int8x16_t) __builtin_aarch64_srshr_nv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrshrq_n_s16 (int16x8_t __a, const int __b)
+{
+  return (int16x8_t) __builtin_aarch64_srshr_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrshrq_n_s32 (int32x4_t __a, const int __b)
+{
+  return (int32x4_t) __builtin_aarch64_srshr_nv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrshrq_n_s64 (int64x2_t __a, const int __b)
+{
+  return (int64x2_t) __builtin_aarch64_srshr_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrshrq_n_u8 (uint8x16_t __a, const int __b)
+{
+  return (uint8x16_t) __builtin_aarch64_urshr_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrshrq_n_u16 (uint16x8_t __a, const int __b)
+{
+  return (uint16x8_t) __builtin_aarch64_urshr_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrshrq_n_u32 (uint32x4_t __a, const int __b)
+{
+  return (uint32x4_t) __builtin_aarch64_urshr_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrshrq_n_u64 (uint64x2_t __a, const int __b)
+{
+  return (uint64x2_t) __builtin_aarch64_urshr_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshrd_n_s64 (int64x1_t __a, const int __b)
+{
+  return (int64x1_t) __builtin_aarch64_srshr_ndi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshrd_n_u64 (uint64x1_t __a, const int __b)
+{
+  return (uint64x1_t) __builtin_aarch64_urshr_ndi (__a, __b);
+}
+
+/* vrsra */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+  return (int8x8_t) __builtin_aarch64_srsra_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+  return (int16x4_t) __builtin_aarch64_srsra_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+  return (int32x2_t) __builtin_aarch64_srsra_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+  return (int64x1_t) __builtin_aarch64_srsra_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+  return (uint8x8_t) __builtin_aarch64_ursra_nv8qi ((int8x8_t) __a,
+                                                   (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+  return (uint16x4_t) __builtin_aarch64_ursra_nv4hi ((int16x4_t) __a,
+                                                    (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+  return (uint32x2_t) __builtin_aarch64_ursra_nv2si ((int32x2_t) __a,
+                                                    (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+  return (uint64x1_t) __builtin_aarch64_ursra_ndi ((int64x1_t) __a,
+                                                  (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+  return (int8x16_t) __builtin_aarch64_srsra_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+  return (int16x8_t) __builtin_aarch64_srsra_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+  return (int32x4_t) __builtin_aarch64_srsra_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+  return (int64x2_t) __builtin_aarch64_srsra_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+  return (uint8x16_t) __builtin_aarch64_ursra_nv16qi ((int8x16_t) __a,
+                                                     (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+  return (uint16x8_t) __builtin_aarch64_ursra_nv8hi ((int16x8_t) __a,
+                                                    (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+  return (uint32x4_t) __builtin_aarch64_ursra_nv4si ((int32x4_t) __a,
+                                                    (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+  return (uint64x2_t) __builtin_aarch64_ursra_nv2di ((int64x2_t) __a,
+                                                    (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrsrad_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+  return (int64x1_t) __builtin_aarch64_srsra_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrsrad_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+  return (uint64x1_t) __builtin_aarch64_ursra_ndi (__a, __b, __c);
+}
+
+/* vshl */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshl_n_s8 (int8x8_t __a, const int __b)
+{
+  return (int8x8_t) __builtin_aarch64_sshl_nv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshl_n_s16 (int16x4_t __a, const int __b)
+{
+  return (int16x4_t) __builtin_aarch64_sshl_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshl_n_s32 (int32x2_t __a, const int __b)
+{
+  return (int32x2_t) __builtin_aarch64_sshl_nv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshl_n_s64 (int64x1_t __a, const int __b)
+{
+  return (int64x1_t) __builtin_aarch64_sshl_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshl_n_u8 (uint8x8_t __a, const int __b)
+{
+  return (uint8x8_t) __builtin_aarch64_ushl_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshl_n_u16 (uint16x4_t __a, const int __b)
+{
+  return (uint16x4_t) __builtin_aarch64_ushl_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshl_n_u32 (uint32x2_t __a, const int __b)
+{
+  return (uint32x2_t) __builtin_aarch64_ushl_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshl_n_u64 (uint64x1_t __a, const int __b)
+{
+  return (uint64x1_t) __builtin_aarch64_ushl_ndi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshlq_n_s8 (int8x16_t __a, const int __b)
+{
+  return (int8x16_t) __builtin_aarch64_sshl_nv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshlq_n_s16 (int16x8_t __a, const int __b)
+{
+  return (int16x8_t) __builtin_aarch64_sshl_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshlq_n_s32 (int32x4_t __a, const int __b)
+{
+  return (int32x4_t) __builtin_aarch64_sshl_nv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshlq_n_s64 (int64x2_t __a, const int __b)
+{
+  return (int64x2_t) __builtin_aarch64_sshl_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshlq_n_u8 (uint8x16_t __a, const int __b)
+{
+  return (uint8x16_t) __builtin_aarch64_ushl_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshlq_n_u16 (uint16x8_t __a, const int __b)
+{
+  return (uint16x8_t) __builtin_aarch64_ushl_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshlq_n_u32 (uint32x4_t __a, const int __b)
+{
+  return (uint32x4_t) __builtin_aarch64_ushl_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshlq_n_u64 (uint64x2_t __a, const int __b)
+{
+  return (uint64x2_t) __builtin_aarch64_ushl_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshld_n_s64 (int64x1_t __a, const int __b)
+{
+  return (int64x1_t) __builtin_aarch64_sshl_ndi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshld_n_u64 (uint64x1_t __a, const int __b)
+{
+  return (uint64x1_t) __builtin_aarch64_ushl_ndi (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return (int8x8_t) __builtin_aarch64_sshlv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (int16x4_t) __builtin_aarch64_sshlv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (int32x2_t) __builtin_aarch64_sshlv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (int64x1_t) __builtin_aarch64_sshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_ushlv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_ushlv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_ushlv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_ushldi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return (int8x16_t) __builtin_aarch64_sshlv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_sshlv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_sshlv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return (int64x2_t) __builtin_aarch64_sshlv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_ushlv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_ushlv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_ushlv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_ushlv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshld_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (int64x1_t) __builtin_aarch64_sshldi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshld_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_ushldi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshll_high_n_s8 (int8x16_t __a, const int __b)
+{
+  return __builtin_aarch64_sshll2_nv16qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshll_high_n_s16 (int16x8_t __a, const int __b)
+{
+  return __builtin_aarch64_sshll2_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshll_high_n_s32 (int32x4_t __a, const int __b)
+{
+  return __builtin_aarch64_sshll2_nv4si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshll_high_n_u8 (uint8x16_t __a, const int __b)
+{
+  return (uint16x8_t) __builtin_aarch64_ushll2_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshll_high_n_u16 (uint16x8_t __a, const int __b)
+{
+  return (uint32x4_t) __builtin_aarch64_ushll2_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshll_high_n_u32 (uint32x4_t __a, const int __b)
+{
+  return (uint64x2_t) __builtin_aarch64_ushll2_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshll_n_s8 (int8x8_t __a, const int __b)
+{
+  return __builtin_aarch64_sshll_nv8qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshll_n_s16 (int16x4_t __a, const int __b)
+{
+  return __builtin_aarch64_sshll_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshll_n_s32 (int32x2_t __a, const int __b)
+{
+  return __builtin_aarch64_sshll_nv2si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshll_n_u8 (uint8x8_t __a, const int __b)
+{
+  return (uint16x8_t) __builtin_aarch64_ushll_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshll_n_u16 (uint16x4_t __a, const int __b)
+{
+  return (uint32x4_t) __builtin_aarch64_ushll_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshll_n_u32 (uint32x2_t __a, const int __b)
+{
+  return (uint64x2_t) __builtin_aarch64_ushll_nv2si ((int32x2_t) __a, __b);
+}
+
+/* vshr */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshr_n_s8 (int8x8_t __a, const int __b)
+{
+  return (int8x8_t) __builtin_aarch64_sshr_nv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshr_n_s16 (int16x4_t __a, const int __b)
+{
+  return (int16x4_t) __builtin_aarch64_sshr_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshr_n_s32 (int32x2_t __a, const int __b)
+{
+  return (int32x2_t) __builtin_aarch64_sshr_nv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshr_n_s64 (int64x1_t __a, const int __b)
+{
+  return (int64x1_t) __builtin_aarch64_sshr_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshr_n_u8 (uint8x8_t __a, const int __b)
+{
+  return (uint8x8_t) __builtin_aarch64_ushr_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshr_n_u16 (uint16x4_t __a, const int __b)
+{
+  return (uint16x4_t) __builtin_aarch64_ushr_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshr_n_u32 (uint32x2_t __a, const int __b)
+{
+  return (uint32x2_t) __builtin_aarch64_ushr_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshr_n_u64 (uint64x1_t __a, const int __b)
+{
+  return (uint64x1_t) __builtin_aarch64_ushr_ndi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshrq_n_s8 (int8x16_t __a, const int __b)
+{
+  return (int8x16_t) __builtin_aarch64_sshr_nv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshrq_n_s16 (int16x8_t __a, const int __b)
+{
+  return (int16x8_t) __builtin_aarch64_sshr_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshrq_n_s32 (int32x4_t __a, const int __b)
+{
+  return (int32x4_t) __builtin_aarch64_sshr_nv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshrq_n_s64 (int64x2_t __a, const int __b)
+{
+  return (int64x2_t) __builtin_aarch64_sshr_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshrq_n_u8 (uint8x16_t __a, const int __b)
+{
+  return (uint8x16_t) __builtin_aarch64_ushr_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshrq_n_u16 (uint16x8_t __a, const int __b)
+{
+  return (uint16x8_t) __builtin_aarch64_ushr_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshrq_n_u32 (uint32x4_t __a, const int __b)
+{
+  return (uint32x4_t) __builtin_aarch64_ushr_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshrq_n_u64 (uint64x2_t __a, const int __b)
+{
+  return (uint64x2_t) __builtin_aarch64_ushr_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshrd_n_s64 (int64x1_t __a, const int __b)
+{
+  return (int64x1_t) __builtin_aarch64_sshr_ndi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshrd_n_u64 (uint64x1_t __a, const int __b)
+{
+  return (uint64x1_t) __builtin_aarch64_ushr_ndi (__a, __b);
+}
+
+/* vsli */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsli_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+  return (int8x8_t) __builtin_aarch64_ssli_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsli_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+  return (int16x4_t) __builtin_aarch64_ssli_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsli_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+  return (int32x2_t) __builtin_aarch64_ssli_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsli_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+  return (int64x1_t) __builtin_aarch64_ssli_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsli_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+  return (uint8x8_t) __builtin_aarch64_usli_nv8qi ((int8x8_t) __a,
+                                                  (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsli_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+  return (uint16x4_t) __builtin_aarch64_usli_nv4hi ((int16x4_t) __a,
+                                                   (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsli_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+  return (uint32x2_t) __builtin_aarch64_usli_nv2si ((int32x2_t) __a,
+                                                   (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsli_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+  return (uint64x1_t) __builtin_aarch64_usli_ndi ((int64x1_t) __a,
+                                                 (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+  return (int8x16_t) __builtin_aarch64_ssli_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+  return (int16x8_t) __builtin_aarch64_ssli_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+  return (int32x4_t) __builtin_aarch64_ssli_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsliq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+  return (int64x2_t) __builtin_aarch64_ssli_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+  return (uint8x16_t) __builtin_aarch64_usli_nv16qi ((int8x16_t) __a,
+                                                    (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+  return (uint16x8_t) __builtin_aarch64_usli_nv8hi ((int16x8_t) __a,
+                                                   (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+  return (uint32x4_t) __builtin_aarch64_usli_nv4si ((int32x4_t) __a,
+                                                   (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsliq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+  return (uint64x2_t) __builtin_aarch64_usli_nv2di ((int64x2_t) __a,
+                                                   (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vslid_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+  return (int64x1_t) __builtin_aarch64_ssli_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vslid_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+  return (uint64x1_t) __builtin_aarch64_usli_ndi (__a, __b, __c);
+}
+
+/* vsqadd */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsqadd_u8 (uint8x8_t __a, int8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_usqaddv8qi ((int8x8_t) __a,
+                                                  (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsqadd_u16 (uint16x4_t __a, int16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_usqaddv4hi ((int16x4_t) __a,
+                                                   (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsqadd_u32 (uint32x2_t __a, int32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_usqaddv2si ((int32x2_t) __a,
+                                                   (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsqadd_u64 (uint64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_usqadddi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsqaddq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_usqaddv16qi ((int8x16_t) __a,
+                                                    (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsqaddq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_usqaddv8hi ((int16x8_t) __a,
+                                                   (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsqaddq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_usqaddv4si ((int32x4_t) __a,
+                                                   (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsqaddq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_usqaddv2di ((int64x2_t) __a,
+                                                   (int64x2_t) __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vsqaddb_u8 (uint8x1_t __a, int8x1_t __b)
+{
+  return (uint8x1_t) __builtin_aarch64_usqaddqi ((int8x1_t) __a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vsqaddh_u16 (uint16x1_t __a, int16x1_t __b)
+{
+  return (uint16x1_t) __builtin_aarch64_usqaddhi ((int16x1_t) __a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vsqadds_u32 (uint32x1_t __a, int32x1_t __b)
+{
+  return (uint32x1_t) __builtin_aarch64_usqaddsi ((int32x1_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsqaddd_u64 (uint64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_usqadddi ((int64x1_t) __a, __b);
+}
+
+/* vsqrt */
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vsqrt_f32 (float32x2_t a)
+{
+  return __builtin_aarch64_sqrtv2sf (a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vsqrtq_f32 (float32x4_t a)
+{
+  return __builtin_aarch64_sqrtv4sf (a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vsqrtq_f64 (float64x2_t a)
+{
+  return __builtin_aarch64_sqrtv2df (a);
+}
+
+/* vsra */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+  return (int8x8_t) __builtin_aarch64_ssra_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+  return (int16x4_t) __builtin_aarch64_ssra_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+  return (int32x2_t) __builtin_aarch64_ssra_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+  return (int64x1_t) __builtin_aarch64_ssra_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+  return (uint8x8_t) __builtin_aarch64_usra_nv8qi ((int8x8_t) __a,
+                                                  (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+  return (uint16x4_t) __builtin_aarch64_usra_nv4hi ((int16x4_t) __a,
+                                                   (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+  return (uint32x2_t) __builtin_aarch64_usra_nv2si ((int32x2_t) __a,
+                                                   (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+  return (uint64x1_t) __builtin_aarch64_usra_ndi ((int64x1_t) __a,
+                                                 (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+  return (int8x16_t) __builtin_aarch64_ssra_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+  return (int16x8_t) __builtin_aarch64_ssra_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+  return (int32x4_t) __builtin_aarch64_ssra_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+  return (int64x2_t) __builtin_aarch64_ssra_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+  return (uint8x16_t) __builtin_aarch64_usra_nv16qi ((int8x16_t) __a,
+                                                    (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+  return (uint16x8_t) __builtin_aarch64_usra_nv8hi ((int16x8_t) __a,
+                                                   (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+  return (uint32x4_t) __builtin_aarch64_usra_nv4si ((int32x4_t) __a,
+                                                   (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+  return (uint64x2_t) __builtin_aarch64_usra_nv2di ((int64x2_t) __a,
+                                                   (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsrad_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+  return (int64x1_t) __builtin_aarch64_ssra_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsrad_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+  return (uint64x1_t) __builtin_aarch64_usra_ndi (__a, __b, __c);
+}
+
+/* vsri */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsri_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+  return (int8x8_t) __builtin_aarch64_ssri_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsri_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+  return (int16x4_t) __builtin_aarch64_ssri_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsri_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+  return (int32x2_t) __builtin_aarch64_ssri_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsri_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+  return (int64x1_t) __builtin_aarch64_ssri_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsri_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+  return (uint8x8_t) __builtin_aarch64_usri_nv8qi ((int8x8_t) __a,
+                                                  (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsri_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+  return (uint16x4_t) __builtin_aarch64_usri_nv4hi ((int16x4_t) __a,
+                                                   (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsri_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+  return (uint32x2_t) __builtin_aarch64_usri_nv2si ((int32x2_t) __a,
+                                                   (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsri_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+  return (uint64x1_t) __builtin_aarch64_usri_ndi ((int64x1_t) __a,
+                                                 (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+  return (int8x16_t) __builtin_aarch64_ssri_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+  return (int16x8_t) __builtin_aarch64_ssri_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+  return (int32x4_t) __builtin_aarch64_ssri_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsriq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+  return (int64x2_t) __builtin_aarch64_ssri_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+  return (uint8x16_t) __builtin_aarch64_usri_nv16qi ((int8x16_t) __a,
+                                                    (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+  return (uint16x8_t) __builtin_aarch64_usri_nv8hi ((int16x8_t) __a,
+                                                   (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+  return (uint32x4_t) __builtin_aarch64_usri_nv4si ((int32x4_t) __a,
+                                                   (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsriq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+  return (uint64x2_t) __builtin_aarch64_usri_nv2di ((int64x2_t) __a,
+                                                   (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsrid_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+  return (int64x1_t) __builtin_aarch64_ssri_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsrid_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+  return (uint64x1_t) __builtin_aarch64_usri_ndi (__a, __b, __c);
+}
+
+/* vstn */
+
+__extension__ static __inline void
+vst2_s64 (int64_t * __a, int64x1x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  int64x2x2_t temp;
+  temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (INT64_C (0)));
+  temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (INT64_C (0)));
+  __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[1], 1);
+  __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst2_u64 (uint64_t * __a, uint64x1x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  uint64x2x2_t temp;
+  temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (UINT64_C (0)));
+  temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[1], 1);
+  __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst2_f64 (float64_t * __a, float64x1x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  float64x2x2_t temp;
+  temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (UINT64_C (0)));
+  temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) temp.val[1], 1);
+  __builtin_aarch64_st2df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+__extension__ static __inline void
+vst2_s8 (int8_t * __a, int8x8x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  int8x16x2_t temp;
+  temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (INT64_C (0)));
+  temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (INT64_C (0)));
+  __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1);
+  __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_p8 (poly8_t * __a, poly8x8x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  poly8x16x2_t temp;
+  temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (UINT64_C (0)));
+  temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1);
+  __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s16 (int16_t * __a, int16x4x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  int16x8x2_t temp;
+  temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (INT64_C (0)));
+  temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (INT64_C (0)));
+  __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1);
+  __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_p16 (poly16_t * __a, poly16x4x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  poly16x8x2_t temp;
+  temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (UINT64_C (0)));
+  temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1);
+  __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s32 (int32_t * __a, int32x2x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  int32x4x2_t temp;
+  temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (INT64_C (0)));
+  temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (INT64_C (0)));
+  __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[1], 1);
+  __builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u8 (uint8_t * __a, uint8x8x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  uint8x16x2_t temp;
+  temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (UINT64_C (0)));
+  temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1);
+  __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u16 (uint16_t * __a, uint16x4x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  uint16x8x2_t temp;
+  temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (UINT64_C (0)));
+  temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1);
+  __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u32 (uint32_t * __a, uint32x2x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  uint32x4x2_t temp;
+  temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (UINT64_C (0)));
+  temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[1], 1);
+  __builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_f32 (float32_t * __a, float32x2x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  float32x4x2_t temp;
+  temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (UINT64_C (0)));
+  temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) temp.val[1], 1);
+  __builtin_aarch64_st2v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s8 (int8_t * __a, int8x16x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[1], 1);
+  __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_p8 (poly8_t * __a, poly8x16x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[1], 1);
+  __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s16 (int16_t * __a, int16x8x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[1], 1);
+  __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_p16 (poly16_t * __a, poly16x8x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[1], 1);
+  __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s32 (int32_t * __a, int32x4x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[1], 1);
+  __builtin_aarch64_st2v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s64 (int64_t * __a, int64x2x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[1], 1);
+  __builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u8 (uint8_t * __a, uint8x16x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[1], 1);
+  __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u16 (uint16_t * __a, uint16x8x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[1], 1);
+  __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u32 (uint32_t * __a, uint32x4x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[1], 1);
+  __builtin_aarch64_st2v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u64 (uint64_t * __a, uint64x2x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[1], 1);
+  __builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_f32 (float32_t * __a, float32x4x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) val.val[1], 1);
+  __builtin_aarch64_st2v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_f64 (float64_t * __a, float64x2x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) val.val[1], 1);
+  __builtin_aarch64_st2v2df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+__extension__ static __inline void
+vst3_s64 (int64_t * __a, int64x1x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  int64x2x3_t temp;
+  temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (INT64_C (0)));
+  temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (INT64_C (0)));
+  temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (INT64_C (0)));
+  __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[2], 2);
+  __builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst3_u64 (uint64_t * __a, uint64x1x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  uint64x2x3_t temp;
+  temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (UINT64_C (0)));
+  temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (UINT64_C (0)));
+  temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[2], 2);
+  __builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst3_f64 (float64_t * __a, float64x1x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  float64x2x3_t temp;
+  temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (UINT64_C (0)));
+  temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (UINT64_C (0)));
+  temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[2], 2);
+  __builtin_aarch64_st3df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+__extension__ static __inline void
+vst3_s8 (int8_t * __a, int8x8x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  int8x16x3_t temp;
+  temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (INT64_C (0)));
+  temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (INT64_C (0)));
+  temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (INT64_C (0)));
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2);
+  __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_p8 (poly8_t * __a, poly8x8x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  poly8x16x3_t temp;
+  temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (UINT64_C (0)));
+  temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (UINT64_C (0)));
+  temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2);
+  __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s16 (int16_t * __a, int16x4x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  int16x8x3_t temp;
+  temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (INT64_C (0)));
+  temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (INT64_C (0)));
+  temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (INT64_C (0)));
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2);
+  __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_p16 (poly16_t * __a, poly16x4x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  poly16x8x3_t temp;
+  temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (UINT64_C (0)));
+  temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (UINT64_C (0)));
+  temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2);
+  __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s32 (int32_t * __a, int32x2x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  int32x4x3_t temp;
+  temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (INT64_C (0)));
+  temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (INT64_C (0)));
+  temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (INT64_C (0)));
+  __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[2], 2);
+  __builtin_aarch64_st3v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u8 (uint8_t * __a, uint8x8x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  uint8x16x3_t temp;
+  temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (UINT64_C (0)));
+  temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (UINT64_C (0)));
+  temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2);
+  __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u16 (uint16_t * __a, uint16x4x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  uint16x8x3_t temp;
+  temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (UINT64_C (0)));
+  temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (UINT64_C (0)));
+  temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2);
+  __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u32 (uint32_t * __a, uint32x2x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  uint32x4x3_t temp;
+  temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (UINT64_C (0)));
+  temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (UINT64_C (0)));
+  temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[2], 2);
+  __builtin_aarch64_st3v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_f32 (float32_t * __a, float32x2x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  float32x4x3_t temp;
+  temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (UINT64_C (0)));
+  temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (UINT64_C (0)));
+  temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[2], 2);
+  __builtin_aarch64_st3v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s8 (int8_t * __a, int8x16x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[2], 2);
+  __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_p8 (poly8_t * __a, poly8x16x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[2], 2);
+  __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s16 (int16_t * __a, int16x8x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[2], 2);
+  __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_p16 (poly16_t * __a, poly16x8x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[2], 2);
+  __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s32 (int32_t * __a, int32x4x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[2], 2);
+  __builtin_aarch64_st3v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s64 (int64_t * __a, int64x2x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[2], 2);
+  __builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u8 (uint8_t * __a, uint8x16x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[2], 2);
+  __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u16 (uint16_t * __a, uint16x8x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[2], 2);
+  __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u32 (uint32_t * __a, uint32x4x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[2], 2);
+  __builtin_aarch64_st3v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u64 (uint64_t * __a, uint64x2x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[2], 2);
+  __builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_f32 (float32_t * __a, float32x4x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) val.val[2], 2);
+  __builtin_aarch64_st3v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_f64 (float64_t * __a, float64x2x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) val.val[2], 2);
+  __builtin_aarch64_st3v2df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+__extension__ static __inline void
+vst4_s64 (int64_t * __a, int64x1x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  int64x2x4_t temp;
+  temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (INT64_C (0)));
+  temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (INT64_C (0)));
+  temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (INT64_C (0)));
+  temp.val[3] = vcombine_s64 (val.val[3], vcreate_s64 (INT64_C (0)));
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[3], 3);
+  __builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst4_u64 (uint64_t * __a, uint64x1x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  uint64x2x4_t temp;
+  temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (UINT64_C (0)));
+  temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (UINT64_C (0)));
+  temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (UINT64_C (0)));
+  temp.val[3] = vcombine_u64 (val.val[3], vcreate_u64 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[3], 3);
+  __builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst4_f64 (float64_t * __a, float64x1x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  float64x2x4_t temp;
+  temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (UINT64_C (0)));
+  temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (UINT64_C (0)));
+  temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (UINT64_C (0)));
+  temp.val[3] = vcombine_f64 (val.val[3], vcreate_f64 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[3], 3);
+  __builtin_aarch64_st4df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+__extension__ static __inline void
+vst4_s8 (int8_t * __a, int8x8x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  int8x16x4_t temp;
+  temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (INT64_C (0)));
+  temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (INT64_C (0)));
+  temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (INT64_C (0)));
+  temp.val[3] = vcombine_s8 (val.val[3], vcreate_s8 (INT64_C (0)));
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[3], 3);
+  __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_p8 (poly8_t * __a, poly8x8x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  poly8x16x4_t temp;
+  temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (UINT64_C (0)));
+  temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (UINT64_C (0)));
+  temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (UINT64_C (0)));
+  temp.val[3] = vcombine_p8 (val.val[3], vcreate_p8 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[3], 3);
+  __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s16 (int16_t * __a, int16x4x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  int16x8x4_t temp;
+  temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (INT64_C (0)));
+  temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (INT64_C (0)));
+  temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (INT64_C (0)));
+  temp.val[3] = vcombine_s16 (val.val[3], vcreate_s16 (INT64_C (0)));
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[3], 3);
+  __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_p16 (poly16_t * __a, poly16x4x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  poly16x8x4_t temp;
+  temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (UINT64_C (0)));
+  temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (UINT64_C (0)));
+  temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (UINT64_C (0)));
+  temp.val[3] = vcombine_p16 (val.val[3], vcreate_p16 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[3], 3);
+  __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s32 (int32_t * __a, int32x2x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  int32x4x4_t temp;
+  temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (INT64_C (0)));
+  temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (INT64_C (0)));
+  temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (INT64_C (0)));
+  temp.val[3] = vcombine_s32 (val.val[3], vcreate_s32 (INT64_C (0)));
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[3], 3);
+  __builtin_aarch64_st4v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u8 (uint8_t * __a, uint8x8x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  uint8x16x4_t temp;
+  temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (UINT64_C (0)));
+  temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (UINT64_C (0)));
+  temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (UINT64_C (0)));
+  temp.val[3] = vcombine_u8 (val.val[3], vcreate_u8 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[3], 3);
+  __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u16 (uint16_t * __a, uint16x4x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  uint16x8x4_t temp;
+  temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (UINT64_C (0)));
+  temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (UINT64_C (0)));
+  temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (UINT64_C (0)));
+  temp.val[3] = vcombine_u16 (val.val[3], vcreate_u16 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[3], 3);
+  __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u32 (uint32_t * __a, uint32x2x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  uint32x4x4_t temp;
+  temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (UINT64_C (0)));
+  temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (UINT64_C (0)));
+  temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (UINT64_C (0)));
+  temp.val[3] = vcombine_u32 (val.val[3], vcreate_u32 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[3], 3);
+  __builtin_aarch64_st4v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_f32 (float32_t * __a, float32x2x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  float32x4x4_t temp;
+  temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (UINT64_C (0)));
+  temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (UINT64_C (0)));
+  temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (UINT64_C (0)));
+  temp.val[3] = vcombine_f32 (val.val[3], vcreate_f32 (UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[3], 3);
+  __builtin_aarch64_st4v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s8 (int8_t * __a, int8x16x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[3], 3);
+  __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_p8 (poly8_t * __a, poly8x16x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[3], 3);
+  __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s16 (int16_t * __a, int16x8x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[3], 3);
+  __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_p16 (poly16_t * __a, poly16x8x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[3], 3);
+  __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s32 (int32_t * __a, int32x4x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[3], 3);
+  __builtin_aarch64_st4v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s64 (int64_t * __a, int64x2x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[3], 3);
+  __builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u8 (uint8_t * __a, uint8x16x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[3], 3);
+  __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u16 (uint16_t * __a, uint16x8x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[3], 3);
+  __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u32 (uint32_t * __a, uint32x4x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[3], 3);
+  __builtin_aarch64_st4v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u64 (uint64_t * __a, uint64x2x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[3], 3);
+  __builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_f32 (float32_t * __a, float32x4x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[3], 3);
+  __builtin_aarch64_st4v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_f64 (float64_t * __a, float64x2x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[3], 3);
+  __builtin_aarch64_st4v2df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+/* vsub */
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsubd_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return __a - __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsubd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return __a - __b;
+}
+
+/* vtrn */
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vtrn_f32 (float32x2_t a, float32x2_t b)
+{
+  return (float32x2x2_t) {vtrn1_f32 (a, b), vtrn2_f32 (a, b)};
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vtrn_p8 (poly8x8_t a, poly8x8_t b)
+{
+  return (poly8x8x2_t) {vtrn1_p8 (a, b), vtrn2_p8 (a, b)};
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vtrn_p16 (poly16x4_t a, poly16x4_t b)
+{
+  return (poly16x4x2_t) {vtrn1_p16 (a, b), vtrn2_p16 (a, b)};
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vtrn_s8 (int8x8_t a, int8x8_t b)
+{
+  return (int8x8x2_t) {vtrn1_s8 (a, b), vtrn2_s8 (a, b)};
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vtrn_s16 (int16x4_t a, int16x4_t b)
+{
+  return (int16x4x2_t) {vtrn1_s16 (a, b), vtrn2_s16 (a, b)};
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vtrn_s32 (int32x2_t a, int32x2_t b)
+{
+  return (int32x2x2_t) {vtrn1_s32 (a, b), vtrn2_s32 (a, b)};
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vtrn_u8 (uint8x8_t a, uint8x8_t b)
+{
+  return (uint8x8x2_t) {vtrn1_u8 (a, b), vtrn2_u8 (a, b)};
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vtrn_u16 (uint16x4_t a, uint16x4_t b)
+{
+  return (uint16x4x2_t) {vtrn1_u16 (a, b), vtrn2_u16 (a, b)};
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vtrn_u32 (uint32x2_t a, uint32x2_t b)
+{
+  return (uint32x2x2_t) {vtrn1_u32 (a, b), vtrn2_u32 (a, b)};
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_f32 (float32x4_t a, float32x4_t b)
+{
+  return (float32x4x2_t) {vtrn1q_f32 (a, b), vtrn2q_f32 (a, b)};
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_p8 (poly8x16_t a, poly8x16_t b)
+{
+  return (poly8x16x2_t) {vtrn1q_p8 (a, b), vtrn2q_p8 (a, b)};
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_p16 (poly16x8_t a, poly16x8_t b)
+{
+  return (poly16x8x2_t) {vtrn1q_p16 (a, b), vtrn2q_p16 (a, b)};
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_s8 (int8x16_t a, int8x16_t b)
+{
+  return (int8x16x2_t) {vtrn1q_s8 (a, b), vtrn2q_s8 (a, b)};
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_s16 (int16x8_t a, int16x8_t b)
+{
+  return (int16x8x2_t) {vtrn1q_s16 (a, b), vtrn2q_s16 (a, b)};
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_s32 (int32x4_t a, int32x4_t b)
+{
+  return (int32x4x2_t) {vtrn1q_s32 (a, b), vtrn2q_s32 (a, b)};
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_u8 (uint8x16_t a, uint8x16_t b)
+{
+  return (uint8x16x2_t) {vtrn1q_u8 (a, b), vtrn2q_u8 (a, b)};
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_u16 (uint16x8_t a, uint16x8_t b)
+{
+  return (uint16x8x2_t) {vtrn1q_u16 (a, b), vtrn2q_u16 (a, b)};
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_u32 (uint32x4_t a, uint32x4_t b)
+{
+  return (uint32x4x2_t) {vtrn1q_u32 (a, b), vtrn2q_u32 (a, b)};
+}
+
+/* vtst */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_s8 (int8x8_t __a, int8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_cmtstv8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtst_s16 (int16x4_t __a, int16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_cmtstv4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtst_s32 (int32x2_t __a, int32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_cmtstv2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtst_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmtstdi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+  return (uint8x8_t) __builtin_aarch64_cmtstv8qi ((int8x8_t) __a,
+                                                (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtst_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+  return (uint16x4_t) __builtin_aarch64_cmtstv4hi ((int16x4_t) __a,
+                                                 (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtst_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+  return (uint32x2_t) __builtin_aarch64_cmtstv2si ((int32x2_t) __a,
+                                                 (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtst_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmtstdi ((int64x1_t) __a,
+                                               (int64x1_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_cmtstv16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtstq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_cmtstv8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtstq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_cmtstv4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vtstq_s64 (int64x2_t __a, int64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_cmtstv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return (uint8x16_t) __builtin_aarch64_cmtstv16qi ((int8x16_t) __a,
+                                                  (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtstq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return (uint16x8_t) __builtin_aarch64_cmtstv8hi ((int16x8_t) __a,
+                                                 (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtstq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return (uint32x4_t) __builtin_aarch64_cmtstv4si ((int32x4_t) __a,
+                                                 (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vtstq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+  return (uint64x2_t) __builtin_aarch64_cmtstv2di ((int64x2_t) __a,
+                                                 (int64x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtstd_s64 (int64x1_t __a, int64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmtstdi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtstd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+  return (uint64x1_t) __builtin_aarch64_cmtstdi ((int64x1_t) __a,
+                                               (int64x1_t) __b);
+}
+
+/* vuqadd */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vuqadd_s8 (int8x8_t __a, uint8x8_t __b)
+{
+  return (int8x8_t) __builtin_aarch64_suqaddv8qi (__a, (int8x8_t) __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vuqadd_s16 (int16x4_t __a, uint16x4_t __b)
+{
+  return (int16x4_t) __builtin_aarch64_suqaddv4hi (__a, (int16x4_t) __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vuqadd_s32 (int32x2_t __a, uint32x2_t __b)
+{
+  return (int32x2_t) __builtin_aarch64_suqaddv2si (__a, (int32x2_t) __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vuqadd_s64 (int64x1_t __a, uint64x1_t __b)
+{
+  return (int64x1_t) __builtin_aarch64_suqadddi (__a, (int64x1_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vuqaddq_s8 (int8x16_t __a, uint8x16_t __b)
+{
+  return (int8x16_t) __builtin_aarch64_suqaddv16qi (__a, (int8x16_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vuqaddq_s16 (int16x8_t __a, uint16x8_t __b)
+{
+  return (int16x8_t) __builtin_aarch64_suqaddv8hi (__a, (int16x8_t) __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vuqaddq_s32 (int32x4_t __a, uint32x4_t __b)
+{
+  return (int32x4_t) __builtin_aarch64_suqaddv4si (__a, (int32x4_t) __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vuqaddq_s64 (int64x2_t __a, uint64x2_t __b)
+{
+  return (int64x2_t) __builtin_aarch64_suqaddv2di (__a, (int64x2_t) __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vuqaddb_s8 (int8x1_t __a, uint8x1_t __b)
+{
+  return (int8x1_t) __builtin_aarch64_suqaddqi (__a, (int8x1_t) __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vuqaddh_s16 (int16x1_t __a, uint16x1_t __b)
+{
+  return (int16x1_t) __builtin_aarch64_suqaddhi (__a, (int16x1_t) __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vuqadds_s32 (int32x1_t __a, uint32x1_t __b)
+{
+  return (int32x1_t) __builtin_aarch64_suqaddsi (__a, (int32x1_t) __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vuqaddd_s64 (int64x1_t __a, uint64x1_t __b)
+{
+  return (int64x1_t) __builtin_aarch64_suqadddi (__a, (int64x1_t) __b);
+}
+
+#define __DEFINTERLEAVE(op, rettype, intype, funcsuffix, Q)            \
+  __extension__ static __inline rettype                                        \
+  __attribute__ ((__always_inline__))                                  \
+  v ## op ## Q ## _ ## funcsuffix (intype a, intype b)                 \
+  {                                                                    \
+    return (rettype) {v ## op ## 1 ## Q ## _ ## funcsuffix (a, b),     \
+                     v ## op ## 2 ## Q ## _ ## funcsuffix (a, b)};     \
+  }
+
+#define __INTERLEAVE_LIST(op)                                  \
+  __DEFINTERLEAVE (op, float32x2x2_t, float32x2_t, f32,)       \
+  __DEFINTERLEAVE (op, poly8x8x2_t, poly8x8_t, p8,)            \
+  __DEFINTERLEAVE (op, poly16x4x2_t, poly16x4_t, p16,)         \
+  __DEFINTERLEAVE (op, int8x8x2_t, int8x8_t, s8,)              \
+  __DEFINTERLEAVE (op, int16x4x2_t, int16x4_t, s16,)           \
+  __DEFINTERLEAVE (op, int32x2x2_t, int32x2_t, s32,)           \
+  __DEFINTERLEAVE (op, uint8x8x2_t, uint8x8_t, u8,)            \
+  __DEFINTERLEAVE (op, uint16x4x2_t, uint16x4_t, u16,)         \
+  __DEFINTERLEAVE (op, uint32x2x2_t, uint32x2_t, u32,)         \
+  __DEFINTERLEAVE (op, float32x4x2_t, float32x4_t, f32, q)     \
+  __DEFINTERLEAVE (op, poly8x16x2_t, poly8x16_t, p8, q)                \
+  __DEFINTERLEAVE (op, poly16x8x2_t, poly16x8_t, p16, q)       \
+  __DEFINTERLEAVE (op, int8x16x2_t, int8x16_t, s8, q)          \
+  __DEFINTERLEAVE (op, int16x8x2_t, int16x8_t, s16, q)         \
+  __DEFINTERLEAVE (op, int32x4x2_t, int32x4_t, s32, q)         \
+  __DEFINTERLEAVE (op, uint8x16x2_t, uint8x16_t, u8, q)                \
+  __DEFINTERLEAVE (op, uint16x8x2_t, uint16x8_t, u16, q)       \
+  __DEFINTERLEAVE (op, uint32x4x2_t, uint32x4_t, u32, q)
+
+/* vuzp */
+
+__INTERLEAVE_LIST (uzp)
+
+/* vzip */
+
+__INTERLEAVE_LIST (zip)
+
+#undef __INTERLEAVE_LIST
+#undef __DEFINTERLEAVE
+
+/* End of optimal implementations in approved order.  */
+
+#endif
diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
new file mode 100644 (file)
index 0000000..fe61307
--- /dev/null
@@ -0,0 +1,167 @@
+;; Machine description for AArch64 architecture.
+;; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_register_constraint "k" "STACK_REG"
+  "@internal The stack register.")
+
+(define_register_constraint "w" "FP_REGS"
+  "Floating point and SIMD vector registers.")
+
+(define_register_constraint "x" "FP_LO_REGS"
+  "Floating point and SIMD vector registers V0 - V15.")
+
+(define_constraint "I"
+ "A constant that can be used with an ADD operation."
+ (and (match_code "const_int")
+      (match_test "aarch64_uimm12_shift (ival)")))
+
+(define_constraint "J"
+ "A constant that can be used with a SUB operation (once negated)."
+ (and (match_code "const_int")
+      (match_test "aarch64_uimm12_shift (-ival)")))
+
+;; We can't use the mode of a CONST_INT to determine the context in
+;; which it is being used, so we must have a separate constraint for
+;; each context.
+
+(define_constraint "K"
+ "A constant that can be used with a 32-bit logical operation."
+ (and (match_code "const_int")
+      (match_test "aarch64_bitmask_imm (ival, SImode)")))
+
+(define_constraint "L"
+ "A constant that can be used with a 64-bit logical operation."
+ (and (match_code "const_int")
+      (match_test "aarch64_bitmask_imm (ival, DImode)")))
+
+(define_constraint "M"
+ "A constant that can be used with a 32-bit MOV immediate operation."
+ (and (match_code "const_int")
+      (match_test "aarch64_move_imm (ival, SImode)")))
+
+(define_constraint "N"
+ "A constant that can be used with a 64-bit MOV immediate operation."
+ (and (match_code "const_int")
+      (match_test "aarch64_move_imm (ival, DImode)")))
+
+(define_constraint "S"
+  "A constraint that matches an absolute symbolic address."
+  (and (match_code "const,symbol_ref,label_ref")
+       (match_test "aarch64_symbolic_address_p (op)")))
+
+(define_constraint "Y"
+  "Floating point constant zero."
+  (and (match_code "const_double")
+       (match_test "aarch64_const_double_zero_rtx_p (op)")))
+
+(define_constraint "Z"
+  "Integer constant zero."
+  (match_test "op == const0_rtx"))
+
+(define_constraint "Usa"
+  "A constraint that matches an absolute symbolic address."
+  (and (match_code "const,symbol_ref")
+       (match_test "aarch64_symbolic_address_p (op)")))
+
+(define_constraint "Ush"
+  "A constraint that matches an absolute symbolic address high part."
+  (and (match_code "high")
+       (match_test "aarch64_valid_symref (XEXP (op, 0), GET_MODE (XEXP (op, 0)))")))
+
+(define_constraint "Uss"
+  "@internal
+  A constraint that matches an immediate shift constant in SImode."
+  (and (match_code "const_int")
+       (match_test "(unsigned HOST_WIDE_INT) ival < 32")))
+
+(define_constraint "Usd"
+  "@internal
+  A constraint that matches an immediate shift constant in DImode."
+  (and (match_code "const_int")
+       (match_test "(unsigned HOST_WIDE_INT) ival < 64")))
+
+(define_constraint "UsM"
+  "@internal
+  A constraint that matches the immediate constant -1."
+  (match_test "op == constm1_rtx"))
+
+(define_constraint "Ui3"
+  "@internal
+  A constraint that matches the integers 0...4."
+  (and (match_code "const_int")
+       (match_test "(unsigned HOST_WIDE_INT) ival <= 4")))
+
+(define_constraint "Up3"
+  "@internal
+  A constraint that matches the integers 2^(0...4)."
+  (and (match_code "const_int")
+       (match_test "(unsigned) exact_log2 (ival) <= 4")))
+
+(define_memory_constraint "Q"
+ "A memory address which uses a single base register with no offset."
+ (and (match_code "mem")
+      (match_test "REG_P (XEXP (op, 0))")))
+
+(define_memory_constraint "Ump"
+  "@internal
+  A memory address suitable for a load/store pair operation."
+  (and (match_code "mem")
+       (match_test "aarch64_legitimate_address_p (GET_MODE (op), XEXP (op, 0),
+                                                 PARALLEL, 1)")))
+
+(define_memory_constraint "Utv"
+  "@internal
+   An address valid for loading/storing opaque structure
+   types wider than TImode."
+  (and (match_code "mem")
+       (match_test "aarch64_simd_mem_operand_p (op)")))
+
+(define_constraint "Dn"
+  "@internal
+ A constraint that matches vector of immediates."
+ (and (match_code "const_vector")
+      (match_test "aarch64_simd_immediate_valid_for_move (op, GET_MODE (op),
+                                                         NULL, NULL, NULL,
+                                                         NULL, NULL) != 0")))
+
+(define_constraint "Dl"
+  "@internal
+ A constraint that matches vector of immediates for left shifts."
+ (and (match_code "const_vector")
+      (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
+                                                true)")))
+
+(define_constraint "Dr"
+  "@internal
+ A constraint that matches vector of immediates for right shifts."
+ (and (match_code "const_vector")
+      (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
+                                                false)")))
+(define_constraint "Dz"
+  "@internal
+ A constraint that matches vector of immediate zero."
+ (and (match_code "const_vector")
+      (match_test "aarch64_simd_imm_zero_p (op, GET_MODE (op))")))
+
+(define_constraint "Dd"
+  "@internal
+ A constraint that matches an immediate operand valid for AdvSIMD scalar."
+ (and (match_code "const_int")
+      (match_test "aarch64_simd_imm_scalar_p (op, GET_MODE (op))")))
diff --git a/gcc/config/aarch64/gentune.sh b/gcc/config/aarch64/gentune.sh
new file mode 100644 (file)
index 0000000..97b3787
--- /dev/null
@@ -0,0 +1,32 @@
+#!/bin/sh
+#
+# Copyright (C) 2011, 2012 Free Software Foundation, Inc.
+# Contributed by ARM Ltd.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3.  If not see
+# <http://www.gnu.org/licenses/>.
+
+# Generate aarch64-tune.md, a file containing the tune attribute from the list of 
+# CPUs in aarch64-cores.def
+
+echo ";; -*- buffer-read-only: t -*-"
+echo ";; Generated automatically by gentune.sh from aarch64-cores.def"
+
+allcores=`awk -F'[(,   ]+' '/^AARCH64_CORE/ { cores = cores$3"," } END { print cores } ' $1`
+
+echo "(define_attr \"tune\""
+echo " \"$allcores\"" | sed -e 's/,"$/"/'
+echo " (const (symbol_ref \"((enum attr_tune) aarch64_tune)\")))"
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
new file mode 100644 (file)
index 0000000..bf2041e
--- /dev/null
@@ -0,0 +1,716 @@
+;; Machine description for AArch64 architecture.
+;; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; -------------------------------------------------------------------
+;; Mode Iterators
+;; -------------------------------------------------------------------
+
+
+;; Iterator for General Purpose Integer registers (32- and 64-bit modes)
+(define_mode_iterator GPI [SI DI])
+
+;; Iterator for QI and HI modes
+(define_mode_iterator SHORT [QI HI])
+
+;; Iterator for all integer modes (up to 64-bit)
+(define_mode_iterator ALLI [QI HI SI DI])
+
+;; Iterator scalar modes (up to 64-bit)
+(define_mode_iterator SDQ_I [QI HI SI DI])
+
+;; Iterator for all integer modes that can be extended (up to 64-bit)
+(define_mode_iterator ALLX [QI HI SI])
+
+;; Iterator for General Purpose Floating-point registers (32- and 64-bit modes)
+(define_mode_iterator GPF [SF DF])
+
+;; Integer vector modes.
+(define_mode_iterator VDQ [V8QI V16QI V4HI V8HI V2SI V4SI V2DI])
+
+;; Integer vector modes.
+(define_mode_iterator VDQ_I [V8QI V16QI V4HI V8HI V2SI V4SI V2DI])
+
+;; vector and scalar, 64 & 128-bit container, all integer modes
+(define_mode_iterator VSDQ_I [V8QI V16QI V4HI V8HI V2SI V4SI V2DI QI HI SI DI])
+
+;; vector and scalar, 64 & 128-bit container: all vector integer modes;
+;; 64-bit scalar integer mode
+(define_mode_iterator VSDQ_I_DI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI DI])
+
+;; Double vector modes.
+(define_mode_iterator VD [V8QI V4HI V2SI V2SF])
+
+;; vector, 64-bit container, all integer modes
+(define_mode_iterator VD_BHSI [V8QI V4HI V2SI])
+
+;; 128 and 64-bit container; 8, 16, 32-bit vector integer modes
+(define_mode_iterator VDQ_BHSI [V8QI V16QI V4HI V8HI V2SI V4SI])
+
+;; Quad vector modes.
+(define_mode_iterator VQ [V16QI V8HI V4SI V2DI V4SF V2DF])
+
+;; All vector modes, except double.
+(define_mode_iterator VQ_S [V8QI V16QI V4HI V8HI V2SI V4SI])
+
+;; Vector and scalar, 64 & 128-bit container: all vector integer mode;
+;; 8, 16, 32-bit scalar integer modes
+(define_mode_iterator VSDQ_I_BHSI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI QI HI SI])
+
+;; Vector modes for moves.
+(define_mode_iterator VDQM [V8QI V16QI V4HI V8HI V2SI V4SI])
+
+;; This mode iterator allows :PTR to be used for patterns that operate on
+;; pointer-sized quantities.  Exactly one of the two alternatives will match.
+(define_mode_iterator PTR [(SI "Pmode == SImode") (DI "Pmode == DImode")])
+
+;; Vector Float modes.
+(define_mode_iterator VDQF [V2SF V4SF V2DF])
+
+;; Vector Float modes with 2 elements.
+(define_mode_iterator V2F [V2SF V2DF])
+
+;; All modes.
+(define_mode_iterator VALL [V8QI V16QI V4HI V8HI V2SI V4SI V2DI V2SF V4SF V2DF])
+
+;; Vector modes for Integer reduction across lanes.
+(define_mode_iterator VDQV [V8QI V16QI V4HI V8HI V4SI])
+
+;; All double integer narrow-able modes.
+(define_mode_iterator VDN [V4HI V2SI DI])
+
+;; All quad integer narrow-able modes.
+(define_mode_iterator VQN [V8HI V4SI V2DI])
+
+;; All double integer widen-able modes.
+(define_mode_iterator VDW [V8QI V4HI V2SI])
+
+;; Vector and scalar 128-bit container: narrowable 16, 32, 64-bit integer modes
+(define_mode_iterator VSQN_HSDI [V8HI V4SI V2DI HI SI DI])
+
+;; All quad integer widen-able modes.
+(define_mode_iterator VQW [V16QI V8HI V4SI])
+
+;; Double vector modes for combines.
+(define_mode_iterator VDC [V8QI V4HI V2SI V2SF DI DF])
+
+;; Double vector modes for combines.
+(define_mode_iterator VDIC [V8QI V4HI V2SI])
+
+;; Double vector modes.
+(define_mode_iterator VD_RE [V8QI V4HI V2SI DI DF V2SF])
+
+;; Vector modes except double int.
+(define_mode_iterator VDQIF [V8QI V16QI V4HI V8HI V2SI V4SI V2SF V4SF V2DF])
+
+;; Vector modes for H and S types.
+(define_mode_iterator VDQHS [V4HI V8HI V2SI V4SI])
+
+;; Vector and scalar integer modes for H and S
+(define_mode_iterator VSDQ_HSI [V4HI V8HI V2SI V4SI HI SI])
+
+;; Vector and scalar 64-bit container: 16, 32-bit integer modes
+(define_mode_iterator VSD_HSI [V4HI V2SI HI SI])
+
+;; Vector 64-bit container: 16, 32-bit integer modes
+(define_mode_iterator VD_HSI [V4HI V2SI])
+
+;; Scalar 64-bit container: 16, 32-bit integer modes
+(define_mode_iterator SD_HSI [HI SI])
+
+;; Vector 64-bit container: 16, 32-bit integer modes
+(define_mode_iterator VQ_HSI [V8HI V4SI])
+
+;; All byte modes.
+(define_mode_iterator VB [V8QI V16QI])
+
+(define_mode_iterator TX [TI TF])
+
+;; Opaque structure modes.
+(define_mode_iterator VSTRUCT [OI CI XI])
+
+;; Double scalar modes
+(define_mode_iterator DX [DI DF])
+
+;; ------------------------------------------------------------------
+;; Unspec enumerations for Advance SIMD. These could well go into
+;; aarch64.md but for their use in int_iterators here.
+;; ------------------------------------------------------------------
+
+(define_c_enum "unspec"
+ [
+    UNSPEC_ASHIFT_SIGNED       ; Used in aarch-simd.md.
+    UNSPEC_ASHIFT_UNSIGNED     ; Used in aarch64-simd.md.
+    UNSPEC_FMAXV       ; Used in aarch64-simd.md.
+    UNSPEC_FMINV       ; Used in aarch64-simd.md.
+    UNSPEC_FADDV       ; Used in aarch64-simd.md.
+    UNSPEC_ADDV                ; Used in aarch64-simd.md.
+    UNSPEC_SMAXV       ; Used in aarch64-simd.md.
+    UNSPEC_SMINV       ; Used in aarch64-simd.md.
+    UNSPEC_UMAXV       ; Used in aarch64-simd.md.
+    UNSPEC_UMINV       ; Used in aarch64-simd.md.
+    UNSPEC_SHADD       ; Used in aarch64-simd.md.
+    UNSPEC_UHADD       ; Used in aarch64-simd.md.
+    UNSPEC_SRHADD      ; Used in aarch64-simd.md.
+    UNSPEC_URHADD      ; Used in aarch64-simd.md.
+    UNSPEC_SHSUB       ; Used in aarch64-simd.md.
+    UNSPEC_UHSUB       ; Used in aarch64-simd.md.
+    UNSPEC_SRHSUB      ; Used in aarch64-simd.md.
+    UNSPEC_URHSUB      ; Used in aarch64-simd.md.
+    UNSPEC_ADDHN       ; Used in aarch64-simd.md.
+    UNSPEC_RADDHN      ; Used in aarch64-simd.md.
+    UNSPEC_SUBHN       ; Used in aarch64-simd.md.
+    UNSPEC_RSUBHN      ; Used in aarch64-simd.md.
+    UNSPEC_ADDHN2      ; Used in aarch64-simd.md.
+    UNSPEC_RADDHN2     ; Used in aarch64-simd.md.
+    UNSPEC_SUBHN2      ; Used in aarch64-simd.md.
+    UNSPEC_RSUBHN2     ; Used in aarch64-simd.md.
+    UNSPEC_SQDMULH     ; Used in aarch64-simd.md.
+    UNSPEC_SQRDMULH    ; Used in aarch64-simd.md.
+    UNSPEC_PMUL                ; Used in aarch64-simd.md.
+    UNSPEC_USQADD      ; Used in aarch64-simd.md.
+    UNSPEC_SUQADD      ; Used in aarch64-simd.md.
+    UNSPEC_SQXTUN      ; Used in aarch64-simd.md.
+    UNSPEC_SQXTN       ; Used in aarch64-simd.md.
+    UNSPEC_UQXTN       ; Used in aarch64-simd.md.
+    UNSPEC_SSRA                ; Used in aarch64-simd.md.
+    UNSPEC_USRA                ; Used in aarch64-simd.md.
+    UNSPEC_SRSRA       ; Used in aarch64-simd.md.
+    UNSPEC_URSRA       ; Used in aarch64-simd.md.
+    UNSPEC_SRSHR       ; Used in aarch64-simd.md.
+    UNSPEC_URSHR       ; Used in aarch64-simd.md.
+    UNSPEC_SQSHLU      ; Used in aarch64-simd.md.
+    UNSPEC_SQSHL       ; Used in aarch64-simd.md.
+    UNSPEC_UQSHL       ; Used in aarch64-simd.md.
+    UNSPEC_SQSHRUN     ; Used in aarch64-simd.md.
+    UNSPEC_SQRSHRUN    ; Used in aarch64-simd.md.
+    UNSPEC_SQSHRN      ; Used in aarch64-simd.md.
+    UNSPEC_UQSHRN      ; Used in aarch64-simd.md.
+    UNSPEC_SQRSHRN     ; Used in aarch64-simd.md.
+    UNSPEC_UQRSHRN     ; Used in aarch64-simd.md.
+    UNSPEC_SSHL                ; Used in aarch64-simd.md.
+    UNSPEC_USHL                ; Used in aarch64-simd.md.
+    UNSPEC_SRSHL       ; Used in aarch64-simd.md.
+    UNSPEC_URSHL       ; Used in aarch64-simd.md.
+    UNSPEC_SQRSHL      ; Used in aarch64-simd.md.
+    UNSPEC_UQRSHL      ; Used in aarch64-simd.md.
+    UNSPEC_CMEQ                ; Used in aarch64-simd.md.
+    UNSPEC_CMLE                ; Used in aarch64-simd.md.
+    UNSPEC_CMLT                ; Used in aarch64-simd.md.
+    UNSPEC_CMGE                ; Used in aarch64-simd.md.
+    UNSPEC_CMGT                ; Used in aarch64-simd.md.
+    UNSPEC_CMHS                ; Used in aarch64-simd.md.
+    UNSPEC_CMHI                ; Used in aarch64-simd.md.
+    UNSPEC_SSLI                ; Used in aarch64-simd.md.
+    UNSPEC_USLI                ; Used in aarch64-simd.md.
+    UNSPEC_SSRI                ; Used in aarch64-simd.md.
+    UNSPEC_USRI                ; Used in aarch64-simd.md.
+    UNSPEC_SSHLL       ; Used in aarch64-simd.md.
+    UNSPEC_USHLL       ; Used in aarch64-simd.md.
+    UNSPEC_ADDP                ; Used in aarch64-simd.md.
+    UNSPEC_CMTST       ; Used in aarch64-simd.md.
+    UNSPEC_FMAX                ; Used in aarch64-simd.md.
+    UNSPEC_FMIN                ; Used in aarch64-simd.md.
+])
+
+;; -------------------------------------------------------------------
+;; Mode attributes
+;; -------------------------------------------------------------------
+
+;; In GPI templates, a string like "%<w>0" will expand to "%w0" in the
+;; 32-bit version and "%x0" in the 64-bit version.
+(define_mode_attr w [(QI "w") (HI "w") (SI "w") (DI "x") (SF "s") (DF "d")])
+
+;; For scalar usage of vector/FP registers
+(define_mode_attr v [(QI "b") (HI "h") (SI "s") (DI "d")
+                   (V8QI "") (V16QI "")
+                   (V4HI "") (V8HI "")
+                   (V2SI "") (V4SI  "")
+                   (V2DI "") (V2SF "")
+                   (V4SF "") (V2DF "")])
+
+;; For scalar usage of vector/FP registers, narrowing
+(define_mode_attr vn2 [(QI "") (HI "b") (SI "h") (DI "s")
+                   (V8QI "") (V16QI "")
+                   (V4HI "") (V8HI "")
+                   (V2SI "") (V4SI  "")
+                   (V2DI "") (V2SF "")
+                   (V4SF "") (V2DF "")])
+
+;; For scalar usage of vector/FP registers, widening
+(define_mode_attr vw2 [(DI "") (QI "h") (HI "s") (SI "d")
+                   (V8QI "") (V16QI "")
+                   (V4HI "") (V8HI "")
+                   (V2SI "") (V4SI  "")
+                   (V2DI "") (V2SF "")
+                   (V4SF "") (V2DF "")])
+
+;; Map a floating point mode to the appropriate register name prefix
+(define_mode_attr s [(SF "s") (DF "d")])
+
+;; Give the length suffix letter for a sign- or zero-extension.
+(define_mode_attr size [(QI "b") (HI "h") (SI "w")])
+
+;; Give the number of bits in the mode
+(define_mode_attr sizen [(QI "8") (HI "16") (SI "32") (DI "64")])
+
+;; Give the ordinal of the MSB in the mode
+(define_mode_attr sizem1 [(QI "#7") (HI "#15") (SI "#31") (DI "#63")])
+
+;; Attribute to describe constants acceptable in logical operations
+(define_mode_attr lconst [(SI "K") (DI "L")])
+
+;; Map a mode to a specific constraint character.
+(define_mode_attr cmode [(QI "q") (HI "h") (SI "s") (DI "d")])
+
+(define_mode_attr Vtype [(V8QI "8b") (V16QI "16b")
+                        (V4HI "4h") (V8HI  "8h")
+                         (V2SI "2s") (V4SI  "4s")
+                         (DI   "1d") (DF    "1d")
+                         (V2DI "2d") (V2SF "2s")
+                        (V4SF "4s") (V2DF "2d")])
+
+(define_mode_attr Vmtype [(V8QI ".8b") (V16QI ".16b")
+                        (V4HI ".4h") (V8HI  ".8h")
+                        (V2SI ".2s") (V4SI  ".4s")
+                        (V2DI ".2d") (V2SF ".2s")
+                        (V4SF ".4s") (V2DF ".2d")
+                        (DI   "")    (SI   "")
+                        (HI   "")    (QI   "")
+                        (TI   "")])
+
+;; Register suffix narrowed modes for VQN.
+(define_mode_attr Vmntype [(V8HI ".8b") (V4SI ".4h")
+                          (V2DI ".2s")
+                          (DI   "")    (SI   "")
+                          (HI   "")])
+
+;; Mode-to-individual element type mapping.
+(define_mode_attr Vetype [(V8QI "b") (V16QI "b")
+                         (V4HI "h") (V8HI  "h")
+                          (V2SI "s") (V4SI  "s")
+                         (V2DI "d") (V2SF  "s")
+                         (V4SF "s") (V2DF  "d")
+                         (QI "b")   (HI "h")
+                         (SI "s")   (DI "d")])
+
+;; Mode-to-bitwise operation type mapping.
+(define_mode_attr Vbtype [(V8QI "8b")  (V16QI "16b")
+                         (V4HI "8b") (V8HI  "16b")
+                         (V2SI "8b") (V4SI  "16b")
+                         (V2DI "16b") (V2SF  "8b")
+                         (V4SF "16b") (V2DF  "16b")])
+
+;; Define element mode for each vector mode.
+(define_mode_attr VEL [(V8QI "QI") (V16QI "QI")
+                       (V4HI "HI") (V8HI "HI")
+                        (V2SI "SI") (V4SI "SI")
+                        (DI "DI")   (V2DI "DI")
+                        (V2SF "SF") (V4SF "SF")
+                        (V2DF "DF")
+                       (SI   "SI") (HI   "HI")
+                       (QI   "QI")])
+
+;; Define container mode for lane selection.
+(define_mode_attr VCON [(V8QI "V16QI") (V16QI "V16QI")
+                       (V4HI "V8HI") (V8HI "V8HI")
+                       (V2SI "V4SI") (V4SI "V4SI")
+                       (DI   "V2DI") (V2DI "V2DI")
+                       (V2SF "V2SF") (V4SF "V4SF")
+                       (V2DF "V2DF") (SI   "V4SI")
+                       (HI   "V8HI") (QI   "V16QI")])
+
+;; Half modes of all vector modes.
+(define_mode_attr VHALF [(V8QI "V4QI")  (V16QI "V8QI")
+                        (V4HI "V2HI")  (V8HI  "V4HI")
+                        (V2SI "SI")    (V4SI  "V2SI")
+                        (V2DI "DI")    (V2SF  "SF")
+                        (V4SF "V2SF")  (V2DF  "DF")])
+
+;; Double modes of vector modes.
+(define_mode_attr VDBL [(V8QI "V16QI") (V4HI "V8HI")
+                       (V2SI "V4SI")  (V2SF "V4SF")
+                       (SI   "V2SI")  (DI   "V2DI")
+                       (DF   "V2DF")])
+
+;; Double modes of vector modes (lower case).
+(define_mode_attr Vdbl [(V8QI "v16qi") (V4HI "v8hi")
+                       (V2SI "v4si")  (V2SF "v4sf")
+                       (SI   "v2si")  (DI   "v2di")])
+
+;; Narrowed modes for VDN.
+(define_mode_attr VNARROWD [(V4HI "V8QI") (V2SI "V4HI")
+                           (DI   "V2SI")])
+
+;; Narrowed double-modes for VQN (Used for XTN).
+(define_mode_attr VNARROWQ [(V8HI "V8QI") (V4SI "V4HI")
+                           (V2DI "V2SI")
+                           (DI   "SI")   (SI   "HI")
+                           (HI   "QI")])
+
+;; Narrowed quad-modes for VQN (Used for XTN2).
+(define_mode_attr VNARROWQ2 [(V8HI "V16QI") (V4SI "V8HI")
+                            (V2DI "V4SI")])
+
+;; Register suffix narrowed modes for VQN.
+(define_mode_attr Vntype [(V8HI "8b") (V4SI "4h")
+                         (V2DI "2s")])
+
+;; Register suffix narrowed modes for VQN.
+(define_mode_attr V2ntype [(V8HI "16b") (V4SI "8h")
+                          (V2DI "4s")])
+
+;; Widened modes of vector modes.
+(define_mode_attr VWIDE [(V8QI "V8HI") (V4HI "V4SI")
+                        (V2SI "V2DI") (V16QI "V8HI") 
+                        (V8HI "V4SI") (V4SI "V2DI")
+                        (HI "SI")     (SI "DI")]
+
+)
+
+;; Widened mode register suffixes for VDW/VQW.
+(define_mode_attr Vwtype [(V8QI "8h") (V4HI "4s")
+                         (V2SI "2d") (V16QI "8h") 
+                         (V8HI "4s") (V4SI "2d")])
+
+;; Widened mode register suffixes for VDW/VQW.
+(define_mode_attr Vmwtype [(V8QI ".8h") (V4HI ".4s")
+                          (V2SI ".2d") (V16QI ".8h") 
+                          (V8HI ".4s") (V4SI ".2d")
+                          (SI   "")    (HI   "")])
+
+;; Lower part register suffixes for VQW.
+(define_mode_attr Vhalftype [(V16QI "8b") (V8HI "4h")
+                            (V4SI "2s")])
+
+;; Define corresponding core/FP element mode for each vector mode.
+(define_mode_attr vw   [(V8QI "w") (V16QI "w")
+                        (V4HI "w") (V8HI "w")
+                        (V2SI "w") (V4SI "w")
+                        (DI   "x") (V2DI "x")
+                        (V2SF "s") (V4SF "s")
+                        (V2DF "d")])
+
+;; Double vector types for ALLX.
+(define_mode_attr Vallxd [(QI "8b") (HI "4h") (SI "2s")])
+
+;; Mode of result of comparison operations.
+(define_mode_attr V_cmp_result [(V8QI "V8QI") (V16QI "V16QI")
+                               (V4HI "V4HI") (V8HI  "V8HI")
+                               (V2SI "V2SI") (V4SI  "V4SI")
+                               (V2SF "V2SI") (V4SF  "V4SI")
+                               (DI   "DI")   (V2DI  "V2DI")])
+
+;; Vm for lane instructions is restricted to FP_LO_REGS.
+(define_mode_attr vwx [(V4HI "x") (V8HI "x") (HI "x")
+                      (V2SI "w") (V4SI "w") (SI "w")])
+
+(define_mode_attr Vendreg [(OI "T") (CI "U") (XI "V")])
+
+(define_mode_attr nregs [(OI "2") (CI "3") (XI "4")])
+
+(define_mode_attr VRL2 [(V8QI "V32QI") (V4HI "V16HI")
+                       (V2SI "V8SI")  (V2SF "V8SF")
+                       (DI   "V4DI")  (DF   "V4DF")
+                       (V16QI "V32QI") (V8HI "V16HI")
+                       (V4SI "V8SI")  (V4SF "V8SF")
+                       (V2DI "V4DI")  (V2DF "V4DF")])
+
+(define_mode_attr VRL3 [(V8QI "V48QI") (V4HI "V24HI")
+                       (V2SI "V12SI")  (V2SF "V12SF")
+                       (DI   "V6DI")  (DF   "V6DF")
+                       (V16QI "V48QI") (V8HI "V24HI")
+                       (V4SI "V12SI")  (V4SF "V12SF")
+                       (V2DI "V6DI")  (V2DF "V6DF")])
+
+(define_mode_attr VRL4 [(V8QI "V64QI") (V4HI "V32HI")
+                       (V2SI "V16SI")  (V2SF "V16SF")
+                       (DI   "V8DI")  (DF   "V8DF")
+                       (V16QI "V64QI") (V8HI "V32HI")
+                       (V4SI "V16SI")  (V4SF "V16SF")
+                       (V2DI "V8DI")  (V2DF "V8DF")])
+
+(define_mode_attr VSTRUCT_DREG [(OI "TI") (CI "EI") (XI "OI")])
+
+;; -------------------------------------------------------------------
+;; Code Iterators
+;; -------------------------------------------------------------------
+
+;; This code iterator allows the various shifts supported on the core
+(define_code_iterator SHIFT [ashift ashiftrt lshiftrt rotatert])
+
+;; This code iterator allows the shifts supported in arithmetic instructions
+(define_code_iterator ASHIFT [ashift ashiftrt lshiftrt])
+
+;; Code iterator for logical operations
+(define_code_iterator LOGICAL [and ior xor])
+
+;; Code iterator for sign/zero extension
+(define_code_iterator ANY_EXTEND [sign_extend zero_extend])
+
+;; All division operations (signed/unsigned)
+(define_code_iterator ANY_DIV [div udiv])
+
+;; Code iterator for sign/zero extraction
+(define_code_iterator ANY_EXTRACT [sign_extract zero_extract])
+
+;; Code iterator for equality comparisons
+(define_code_iterator EQL [eq ne])
+
+;; Code iterator for less-than and greater/equal-to
+(define_code_iterator LTGE [lt ge])
+
+;; Iterator for __sync_<op> operations that where the operation can be
+;; represented directly RTL.  This is all of the sync operations bar
+;; nand.
+(define_code_iterator syncop [plus minus ior xor and])
+
+;; Iterator for integer conversions
+(define_code_iterator FIXUORS [fix unsigned_fix])
+
+;; Code iterator for variants of vector max and min.
+(define_code_iterator MAXMIN [smax smin umax umin])
+
+;; Code iterator for variants of vector max and min.
+(define_code_iterator ADDSUB [plus minus])
+
+;; Code iterator for variants of vector saturating binary ops.
+(define_code_iterator BINQOPS [ss_plus us_plus ss_minus us_minus])
+
+;; Code iterator for variants of vector saturating unary ops.
+(define_code_iterator UNQOPS [ss_neg ss_abs])
+
+;; Code iterator for signed variants of vector saturating binary ops.
+(define_code_iterator SBINQOPS [ss_plus ss_minus])
+
+;; -------------------------------------------------------------------
+;; Code Attributes
+;; -------------------------------------------------------------------
+;; Map rtl objects to optab names
+(define_code_attr optab [(ashift "ashl")
+                        (ashiftrt "ashr")
+                        (lshiftrt "lshr")
+                        (rotatert "rotr")
+                        (sign_extend "extend")
+                        (zero_extend "zero_extend")
+                        (sign_extract "extv")
+                        (zero_extract "extzv")
+                        (and "and")
+                        (ior "ior")
+                        (xor "xor")
+                        (not "one_cmpl")
+                        (neg "neg")
+                        (plus "add")
+                        (minus "sub")
+                        (ss_plus "qadd")
+                        (us_plus "qadd")
+                        (ss_minus "qsub")
+                        (us_minus "qsub")
+                        (ss_neg "qneg")
+                        (ss_abs "qabs")
+                        (eq "eq")
+                        (ne "ne")
+                        (lt "lt")
+                        (ge "ge")])
+
+;; Optab prefix for sign/zero-extending operations
+(define_code_attr su_optab [(sign_extend "") (zero_extend "u")
+                           (div "") (udiv "u")
+                           (fix "") (unsigned_fix "u")
+                           (ss_plus "s") (us_plus "u")
+                           (ss_minus "s") (us_minus "u")])
+
+;; Similar for the instruction mnemonics
+(define_code_attr shift [(ashift "lsl") (ashiftrt "asr")
+                        (lshiftrt "lsr") (rotatert "ror")])
+
+;; Map shift operators onto underlying bit-field instructions
+(define_code_attr bfshift [(ashift "ubfiz") (ashiftrt "sbfx")
+                          (lshiftrt "ubfx") (rotatert "extr")])
+
+;; Logical operator instruction mnemonics
+(define_code_attr logical [(and "and") (ior "orr") (xor "eor")])
+
+;; Similar, but when not(op)
+(define_code_attr nlogical [(and "bic") (ior "orn") (xor "eon")])
+
+;; Sign- or zero-extending load
+(define_code_attr ldrxt [(sign_extend "ldrs") (zero_extend "ldr")])
+
+;; Sign- or zero-extending data-op
+(define_code_attr su [(sign_extend "s") (zero_extend "u")
+                     (sign_extract "s") (zero_extract "u")
+                     (fix "s") (unsigned_fix "u")
+                     (div "s") (udiv "u")])
+
+;; Emit cbz/cbnz depending on comparison type.
+(define_code_attr cbz [(eq "cbz") (ne "cbnz") (lt "cbnz") (ge "cbz")])
+
+;; Emit tbz/tbnz depending on comparison type.
+(define_code_attr tbz [(eq "tbz") (ne "tbnz") (lt "tbnz") (ge "tbz")])
+
+;; Max/min attributes.
+(define_code_attr maxmin [(smax "smax")
+                         (smin "smin")
+                         (umax "umax")
+                         (umin "umin")])
+
+;; MLA/MLS attributes.
+(define_code_attr as [(ss_plus "a") (ss_minus "s")])
+
+
+;; -------------------------------------------------------------------
+;; Int Iterators.
+;; -------------------------------------------------------------------
+(define_int_iterator MAXMINV [UNSPEC_UMAXV UNSPEC_UMINV
+                             UNSPEC_SMAXV UNSPEC_SMINV])
+
+(define_int_iterator FMAXMINV [UNSPEC_FMAXV UNSPEC_FMINV])
+
+(define_int_iterator HADDSUB [UNSPEC_SHADD UNSPEC_UHADD
+                             UNSPEC_SRHADD UNSPEC_URHADD
+                             UNSPEC_SHSUB UNSPEC_UHSUB
+                             UNSPEC_SRHSUB UNSPEC_URHSUB])
+
+
+(define_int_iterator ADDSUBHN [UNSPEC_ADDHN UNSPEC_RADDHN
+                              UNSPEC_SUBHN UNSPEC_RSUBHN])
+
+(define_int_iterator ADDSUBHN2 [UNSPEC_ADDHN2 UNSPEC_RADDHN2
+                               UNSPEC_SUBHN2 UNSPEC_RSUBHN2])
+
+(define_int_iterator FMAXMIN [UNSPEC_FMAX UNSPEC_FMIN])
+
+(define_int_iterator VQDMULH [UNSPEC_SQDMULH UNSPEC_SQRDMULH])
+
+(define_int_iterator USSUQADD [UNSPEC_SUQADD UNSPEC_USQADD])
+
+(define_int_iterator SUQMOVN [UNSPEC_SQXTN UNSPEC_UQXTN])
+
+(define_int_iterator VSHL [UNSPEC_SSHL UNSPEC_USHL
+                          UNSPEC_SRSHL UNSPEC_URSHL])
+
+(define_int_iterator VSHLL [UNSPEC_SSHLL UNSPEC_USHLL])
+
+(define_int_iterator VQSHL [UNSPEC_SQSHL UNSPEC_UQSHL
+                            UNSPEC_SQRSHL UNSPEC_UQRSHL])
+
+(define_int_iterator VSRA [UNSPEC_SSRA UNSPEC_USRA
+                            UNSPEC_SRSRA UNSPEC_URSRA])
+
+(define_int_iterator VSLRI [UNSPEC_SSLI UNSPEC_USLI
+                             UNSPEC_SSRI UNSPEC_USRI])
+
+
+(define_int_iterator VRSHR_N [UNSPEC_SRSHR UNSPEC_URSHR])
+
+(define_int_iterator VQSHL_N [UNSPEC_SQSHLU UNSPEC_SQSHL UNSPEC_UQSHL])
+
+(define_int_iterator VQSHRN_N [UNSPEC_SQSHRUN UNSPEC_SQRSHRUN
+                               UNSPEC_SQSHRN UNSPEC_UQSHRN
+                               UNSPEC_SQRSHRN UNSPEC_UQRSHRN])
+
+(define_int_iterator VCMP_S [UNSPEC_CMEQ UNSPEC_CMGE UNSPEC_CMGT
+                            UNSPEC_CMLE UNSPEC_CMLT])
+
+(define_int_iterator VCMP_U [UNSPEC_CMHS UNSPEC_CMHI UNSPEC_CMTST])
+
+
+;; -------------------------------------------------------------------
+;; Int Iterators Attributes.
+;; -------------------------------------------------------------------
+(define_int_attr  maxminv [(UNSPEC_UMAXV "umax")
+                          (UNSPEC_UMINV "umin")
+                          (UNSPEC_SMAXV "smax")
+                          (UNSPEC_SMINV "smin")])
+
+(define_int_attr  fmaxminv [(UNSPEC_FMAXV "max")
+                           (UNSPEC_FMINV "min")])
+
+(define_int_attr  fmaxmin [(UNSPEC_FMAX "fmax")
+                          (UNSPEC_FMIN "fmin")])
+
+(define_int_attr sur [(UNSPEC_SHADD "s") (UNSPEC_UHADD "u")
+                     (UNSPEC_SRHADD "sr") (UNSPEC_URHADD "ur")
+                     (UNSPEC_SHSUB "s") (UNSPEC_UHSUB "u")
+                     (UNSPEC_SRHSUB "sr") (UNSPEC_URHSUB "ur")
+                     (UNSPEC_ADDHN "") (UNSPEC_RADDHN "r")
+                     (UNSPEC_SUBHN "") (UNSPEC_RSUBHN "r")
+                     (UNSPEC_ADDHN2 "") (UNSPEC_RADDHN2 "r")
+                     (UNSPEC_SUBHN2 "") (UNSPEC_RSUBHN2 "r")
+                     (UNSPEC_SQXTN "s") (UNSPEC_UQXTN "u")
+                     (UNSPEC_USQADD "us") (UNSPEC_SUQADD "su")
+                     (UNSPEC_SSLI  "s") (UNSPEC_USLI  "u")
+                     (UNSPEC_SSRI  "s") (UNSPEC_USRI  "u")
+                     (UNSPEC_USRA  "u") (UNSPEC_SSRA  "s")
+                     (UNSPEC_URSRA  "ur") (UNSPEC_SRSRA  "sr")
+                     (UNSPEC_URSHR  "ur") (UNSPEC_SRSHR  "sr")
+                     (UNSPEC_SQSHLU "s") (UNSPEC_SQSHL   "s")
+                     (UNSPEC_UQSHL  "u")
+                     (UNSPEC_SQSHRUN "s") (UNSPEC_SQRSHRUN "s")
+                      (UNSPEC_SQSHRN "s")  (UNSPEC_UQSHRN "u")
+                      (UNSPEC_SQRSHRN "s") (UNSPEC_UQRSHRN "u")
+                     (UNSPEC_USHL  "u")   (UNSPEC_SSHL  "s")
+                     (UNSPEC_USHLL  "u")  (UNSPEC_SSHLL "s")
+                     (UNSPEC_URSHL  "ur") (UNSPEC_SRSHL  "sr")
+                     (UNSPEC_UQRSHL  "u") (UNSPEC_SQRSHL  "s")
+])
+
+(define_int_attr r [(UNSPEC_SQDMULH "") (UNSPEC_SQRDMULH "r")
+                   (UNSPEC_SQSHRUN "") (UNSPEC_SQRSHRUN "r")
+                    (UNSPEC_SQSHRN "")  (UNSPEC_UQSHRN "")
+                    (UNSPEC_SQRSHRN "r") (UNSPEC_UQRSHRN "r")
+                    (UNSPEC_SQSHL   "")  (UNSPEC_UQSHL  "")
+                    (UNSPEC_SQRSHL   "r")(UNSPEC_UQRSHL  "r")
+])
+
+(define_int_attr lr [(UNSPEC_SSLI  "l") (UNSPEC_USLI  "l")
+                    (UNSPEC_SSRI  "r") (UNSPEC_USRI  "r")])
+
+(define_int_attr u [(UNSPEC_SQSHLU "u") (UNSPEC_SQSHL "") (UNSPEC_UQSHL "")
+                   (UNSPEC_SQSHRUN "u") (UNSPEC_SQRSHRUN "u")
+                    (UNSPEC_SQSHRN "")  (UNSPEC_UQSHRN "")
+                    (UNSPEC_SQRSHRN "") (UNSPEC_UQRSHRN "")])
+
+(define_int_attr addsub [(UNSPEC_SHADD "add")
+                        (UNSPEC_UHADD "add")
+                        (UNSPEC_SRHADD "add")
+                        (UNSPEC_URHADD "add")
+                        (UNSPEC_SHSUB "sub")
+                        (UNSPEC_UHSUB "sub")
+                        (UNSPEC_SRHSUB "sub")
+                        (UNSPEC_URHSUB "sub")
+                        (UNSPEC_ADDHN "add")
+                        (UNSPEC_SUBHN "sub")
+                        (UNSPEC_RADDHN "add")
+                        (UNSPEC_RSUBHN "sub")
+                        (UNSPEC_ADDHN2 "add")
+                        (UNSPEC_SUBHN2 "sub")
+                        (UNSPEC_RADDHN2 "add")
+                        (UNSPEC_RSUBHN2 "sub")])
+
+(define_int_attr cmp [(UNSPEC_CMGE "ge") (UNSPEC_CMGT "gt")
+                     (UNSPEC_CMLE "le") (UNSPEC_CMLT "lt")
+                      (UNSPEC_CMEQ "eq")
+                     (UNSPEC_CMHS "hs") (UNSPEC_CMHI "hi")
+                     (UNSPEC_CMTST "tst")])
+
+(define_int_attr offsetlr [(UNSPEC_SSLI        "1") (UNSPEC_USLI "1")
+                          (UNSPEC_SSRI "0") (UNSPEC_USRI "0")])
+
diff --git a/gcc/config/aarch64/large.md b/gcc/config/aarch64/large.md
new file mode 100644 (file)
index 0000000..1e73dc3
--- /dev/null
@@ -0,0 +1,312 @@
+;; Copyright (C) 2012 Free Software Foundation, Inc.
+;;
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; In the absence of any ARMv8-A implementations, two examples derived
+;; from ARM's most recent ARMv7-A cores (Cortex-A7 and Cortex-A15) are
+;; included by way of example.  This is a temporary measure.
+
+;; Example pipeline description for an example 'large' core
+;; implementing AArch64
+
+;;-------------------------------------------------------
+;; General Description
+;;-------------------------------------------------------
+
+(define_automaton "large_cpu")
+
+;; The core is modelled as a triple issue pipeline that has
+;; the following dispatch units.
+;; 1. Two pipelines for simple integer operations: int1, int2
+;; 2. Two pipelines for SIMD and FP data-processing operations: fpsimd1, fpsimd2
+;; 3. One pipeline for branch operations: br
+;; 4. One pipeline for integer multiply and divide operations: multdiv
+;; 5. Two pipelines for load and store operations: ls1, ls2
+;;
+;; We can issue into three pipelines per-cycle.
+;;
+;; We assume that where we have unit pairs xxx1 is always filled before xxx2.
+
+;;-------------------------------------------------------
+;; CPU Units and Reservations
+;;-------------------------------------------------------
+
+;; The three issue units
+(define_cpu_unit "large_cpu_unit_i1, large_cpu_unit_i2, large_cpu_unit_i3" "large_cpu")
+
+(define_reservation "large_cpu_resv_i1"
+                   "(large_cpu_unit_i1 | large_cpu_unit_i2 | large_cpu_unit_i3)")
+
+(define_reservation "large_cpu_resv_i2"
+                   "((large_cpu_unit_i1 + large_cpu_unit_i2) | (large_cpu_unit_i2 + large_cpu_unit_i3))")
+
+(define_reservation "large_cpu_resv_i3"
+                   "(large_cpu_unit_i1 + large_cpu_unit_i2 + large_cpu_unit_i3)")
+
+(final_presence_set "large_cpu_unit_i2" "large_cpu_unit_i1")
+(final_presence_set "large_cpu_unit_i3" "large_cpu_unit_i2")
+
+;; The main dispatch units
+(define_cpu_unit "large_cpu_unit_int1, large_cpu_unit_int2" "large_cpu")
+(define_cpu_unit "large_cpu_unit_fpsimd1, large_cpu_unit_fpsimd2" "large_cpu")
+(define_cpu_unit "large_cpu_unit_ls1, large_cpu_unit_ls2" "large_cpu")
+(define_cpu_unit "large_cpu_unit_br" "large_cpu")
+(define_cpu_unit "large_cpu_unit_multdiv" "large_cpu")
+
+(define_reservation "large_cpu_resv_ls" "(large_cpu_unit_ls1 | large_cpu_unit_ls2)")
+
+;; The extended load-store pipeline
+(define_cpu_unit "large_cpu_unit_load, large_cpu_unit_store" "large_cpu")
+
+;; The extended ALU pipeline
+(define_cpu_unit "large_cpu_unit_int1_alu, large_cpu_unit_int2_alu" "large_cpu")
+(define_cpu_unit "large_cpu_unit_int1_shf, large_cpu_unit_int2_shf" "large_cpu")
+(define_cpu_unit "large_cpu_unit_int1_sat, large_cpu_unit_int2_sat" "large_cpu")
+
+
+;;-------------------------------------------------------
+;; Simple ALU Instructions
+;;-------------------------------------------------------
+
+;; Simple ALU operations without shift
+(define_insn_reservation "large_cpu_alu" 2
+  (and (eq_attr "tune" "large") (eq_attr "v8type" "adc,alu,alu_ext"))
+  "large_cpu_resv_i1, \
+   (large_cpu_unit_int1, large_cpu_unit_int1_alu) |\
+     (large_cpu_unit_int2, large_cpu_unit_int2_alu)")
+
+(define_insn_reservation "large_cpu_logic" 2
+  (and (eq_attr "tune" "large") (eq_attr "v8type" "logic,logic_imm"))
+  "large_cpu_resv_i1, \
+   (large_cpu_unit_int1, large_cpu_unit_int1_alu) |\
+     (large_cpu_unit_int2, large_cpu_unit_int2_alu)")
+
+(define_insn_reservation "large_cpu_shift" 2
+  (and (eq_attr "tune" "large") (eq_attr "v8type" "shift,shift_imm"))
+  "large_cpu_resv_i1, \
+   (large_cpu_unit_int1, large_cpu_unit_int1_shf) |\
+     (large_cpu_unit_int2, large_cpu_unit_int2_shf)")
+
+;; Simple ALU operations with immediate shift
+(define_insn_reservation "large_cpu_alu_shift" 3
+  (and (eq_attr "tune" "large") (eq_attr "v8type" "alu_shift"))
+  "large_cpu_resv_i1, \
+   (large_cpu_unit_int1,
+     large_cpu_unit_int1 + large_cpu_unit_int1_shf, large_cpu_unit_int1_alu) | \
+   (large_cpu_unit_int2,
+     large_cpu_unit_int2 + large_cpu_unit_int2_shf, large_cpu_unit_int2_alu)")
+
+(define_insn_reservation "large_cpu_logic_shift" 3
+  (and (eq_attr "tune" "large") (eq_attr "v8type" "logic_shift"))
+  "large_cpu_resv_i1, \
+   (large_cpu_unit_int1, large_cpu_unit_int1_alu) |\
+     (large_cpu_unit_int2, large_cpu_unit_int2_alu)")
+
+
+;;-------------------------------------------------------
+;; Multiplication/Division
+;;-------------------------------------------------------
+
+;; Simple multiplication
+(define_insn_reservation "large_cpu_mult_single" 3
+  (and (eq_attr "tune" "large")
+       (and (eq_attr "v8type" "mult,madd") (eq_attr "mode" "SI")))
+  "large_cpu_resv_i1, large_cpu_unit_multdiv")
+
+(define_insn_reservation "large_cpu_mult_double" 4
+  (and (eq_attr "tune" "large")
+       (and (eq_attr "v8type" "mult,madd") (eq_attr "mode" "DI")))
+  "large_cpu_resv_i1, large_cpu_unit_multdiv")
+
+;; 64-bit multiplication
+(define_insn_reservation "large_cpu_mull" 4
+  (and (eq_attr "tune" "large") (eq_attr "v8type" "mull,mulh,maddl"))
+  "large_cpu_resv_i1, large_cpu_unit_multdiv * 2")
+
+;; Division
+(define_insn_reservation "large_cpu_udiv_single" 9
+  (and (eq_attr "tune" "large")
+       (and (eq_attr "v8type" "udiv") (eq_attr "mode" "SI")))
+  "large_cpu_resv_i1, large_cpu_unit_multdiv")
+
+(define_insn_reservation "large_cpu_udiv_double" 18
+  (and (eq_attr "tune" "large")
+       (and (eq_attr "v8type" "udiv") (eq_attr "mode" "DI")))
+  "large_cpu_resv_i1, large_cpu_unit_multdiv")
+
+(define_insn_reservation "large_cpu_sdiv_single" 10
+  (and (eq_attr "tune" "large")
+       (and (eq_attr "v8type" "sdiv") (eq_attr "mode" "SI")))
+  "large_cpu_resv_i1, large_cpu_unit_multdiv")
+
+(define_insn_reservation "large_cpu_sdiv_double" 20
+  (and (eq_attr "tune" "large")
+       (and (eq_attr "v8type" "sdiv") (eq_attr "mode" "DI")))
+  "large_cpu_resv_i1, large_cpu_unit_multdiv")
+
+
+;;-------------------------------------------------------
+;; Branches
+;;-------------------------------------------------------
+
+;; Branches take one issue slot.
+;; No latency as there is no result
+(define_insn_reservation "large_cpu_branch" 0
+  (and (eq_attr "tune" "large") (eq_attr "v8type" "branch"))
+  "large_cpu_resv_i1, large_cpu_unit_br")
+
+
+;; Calls take up all issue slots, and form a block in the
+;; pipeline.  The result however is available the next cycle.
+;; Addition of new units requires this to be updated.
+(define_insn_reservation "large_cpu_call" 1
+  (and (eq_attr "tune" "large") (eq_attr "v8type" "call"))
+  "large_cpu_resv_i3 | large_cpu_resv_i2, \
+   large_cpu_unit_int1 + large_cpu_unit_int2 + large_cpu_unit_br + \
+     large_cpu_unit_multdiv + large_cpu_unit_fpsimd1 + large_cpu_unit_fpsimd2 + \
+     large_cpu_unit_ls1 + large_cpu_unit_ls2,\
+   large_cpu_unit_int1_alu + large_cpu_unit_int1_shf + large_cpu_unit_int1_sat + \
+     large_cpu_unit_int2_alu + large_cpu_unit_int2_shf + \
+     large_cpu_unit_int2_sat + large_cpu_unit_load + large_cpu_unit_store")
+
+
+;;-------------------------------------------------------
+;; Load/Store Instructions
+;;-------------------------------------------------------
+
+;; Loads of up to two words.
+(define_insn_reservation "large_cpu_load1" 4
+  (and (eq_attr "tune" "large") (eq_attr "v8type" "load_acq,load1,load2"))
+  "large_cpu_resv_i1, large_cpu_resv_ls, large_cpu_unit_load, nothing")
+
+;; Stores of up to two words.
+(define_insn_reservation "large_cpu_store1" 0
+  (and (eq_attr "tune" "large") (eq_attr "v8type" "store_rel,store1,store2"))
+  "large_cpu_resv_i1, large_cpu_resv_ls, large_cpu_unit_store")
+
+
+;;-------------------------------------------------------
+;; Floating-point arithmetic.
+;;-------------------------------------------------------
+
+(define_insn_reservation "large_cpu_fpalu" 4
+  (and (eq_attr "tune" "large")
+       (eq_attr "v8type" "ffarith,fadd,fccmp,fcvt,fcmp"))
+  "large_cpu_resv_i1 + large_cpu_unit_fpsimd1")
+
+(define_insn_reservation "large_cpu_fconst" 3
+  (and (eq_attr "tune" "large")
+       (eq_attr "v8type" "fconst"))
+  "large_cpu_resv_i1 + large_cpu_unit_fpsimd1")
+
+(define_insn_reservation "large_cpu_fpmuls" 4
+  (and (eq_attr "tune" "large")
+       (and (eq_attr "v8type" "fmul,fmadd") (eq_attr "mode" "SF")))
+  "large_cpu_resv_i1 + large_cpu_unit_fpsimd1")
+
+(define_insn_reservation "large_cpu_fpmuld" 7
+  (and (eq_attr "tune" "large")
+       (and (eq_attr "v8type" "fmul,fmadd") (eq_attr "mode" "DF")))
+  "large_cpu_resv_i1 + large_cpu_unit_fpsimd1, large_cpu_unit_fpsimd1 * 2,\
+   large_cpu_resv_i1 + large_cpu_unit_fpsimd1")
+
+
+;;-------------------------------------------------------
+;; Floating-point Division
+;;-------------------------------------------------------
+
+;; Single-precision divide takes 14 cycles to complete, and this
+;; includes the time taken for the special instruction used to collect the
+;; result to travel down the multiply pipeline.
+
+(define_insn_reservation "large_cpu_fdivs" 14
+  (and (eq_attr "tune" "large")
+       (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "SF")))
+  "large_cpu_resv_i1, large_cpu_unit_fpsimd1 * 13")
+
+(define_insn_reservation "large_cpu_fdivd" 29
+  (and (eq_attr "tune" "large")
+       (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "DF")))
+  "large_cpu_resv_i1, large_cpu_unit_fpsimd1 * 28")
+
+
+
+;;-------------------------------------------------------
+;; Floating-point Transfers
+;;-------------------------------------------------------
+
+(define_insn_reservation "large_cpu_i2f" 4
+  (and (eq_attr "tune" "large")
+       (eq_attr "v8type" "fmovi2f"))
+  "large_cpu_resv_i1")
+
+(define_insn_reservation "large_cpu_f2i" 2
+  (and (eq_attr "tune" "large")
+       (eq_attr "v8type" "fmovf2i"))
+  "large_cpu_resv_i1")
+
+
+;;-------------------------------------------------------
+;; Floating-point Load/Store
+;;-------------------------------------------------------
+
+(define_insn_reservation "large_cpu_floads" 4
+  (and (eq_attr "tune" "large")
+       (and (eq_attr "v8type" "fpsimd_load,fpsimd_load2") (eq_attr "mode" "SF")))
+  "large_cpu_resv_i1")
+
+(define_insn_reservation "large_cpu_floadd" 5
+  (and (eq_attr "tune" "large")
+       (and (eq_attr "v8type" "fpsimd_load,fpsimd_load2") (eq_attr "mode" "DF")))
+  "large_cpu_resv_i1 + large_cpu_unit_br, large_cpu_resv_i1")
+
+(define_insn_reservation "large_cpu_fstores" 0
+  (and (eq_attr "tune" "large")
+       (and (eq_attr "v8type" "fpsimd_store,fpsimd_store2") (eq_attr "mode" "SF")))
+  "large_cpu_resv_i1")
+
+(define_insn_reservation "large_cpu_fstored" 0
+  (and (eq_attr "tune" "large")
+       (and (eq_attr "v8type" "fpsimd_store,fpsimd_store2") (eq_attr "mode" "DF")))
+  "large_cpu_resv_i1 + large_cpu_unit_br, large_cpu_resv_i1")
+
+
+;;-------------------------------------------------------
+;; Bypasses
+;;-------------------------------------------------------
+
+(define_bypass 1 "large_cpu_alu, large_cpu_logic, large_cpu_shift"
+  "large_cpu_alu, large_cpu_alu_shift, large_cpu_logic, large_cpu_logic_shift, large_cpu_shift")
+
+(define_bypass 2 "large_cpu_alu_shift, large_cpu_logic_shift"
+  "large_cpu_alu, large_cpu_alu_shift, large_cpu_logic, large_cpu_logic_shift, large_cpu_shift")
+
+(define_bypass 1 "large_cpu_alu, large_cpu_logic, large_cpu_shift" "large_cpu_load1")
+
+(define_bypass 2 "large_cpu_alu_shift, large_cpu_logic_shift" "large_cpu_load1")
+
+(define_bypass 2 "large_cpu_floads"
+                 "large_cpu_fpalu, large_cpu_fpmuld,\
+                 large_cpu_fdivs, large_cpu_fdivd,\
+                 large_cpu_f2i")
+
+(define_bypass 3 "large_cpu_floadd"
+                 "large_cpu_fpalu, large_cpu_fpmuld,\
+                 large_cpu_fdivs, large_cpu_fdivd,\
+                 large_cpu_f2i")
diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
new file mode 100644 (file)
index 0000000..6f79039
--- /dev/null
@@ -0,0 +1,297 @@
+;; Machine description for AArch64 architecture.
+;; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_special_predicate "cc_register"
+  (and (match_code "reg")
+       (and (match_test "REGNO (op) == CC_REGNUM")
+           (ior (match_test "mode == GET_MODE (op)")
+                (match_test "mode == VOIDmode
+                             && GET_MODE_CLASS (GET_MODE (op)) == MODE_CC"))))
+)
+
+(define_predicate "aarch64_reg_or_zero"
+  (and (match_code "reg,subreg,const_int")
+       (ior (match_operand 0 "register_operand")
+           (match_test "op == const0_rtx"))))
+
+(define_predicate "aarch64_reg_zero_or_m1"
+  (and (match_code "reg,subreg,const_int")
+       (ior (match_operand 0 "register_operand")
+           (ior (match_test "op == const0_rtx")
+                (match_test "op == constm1_rtx")))))
+
+(define_predicate "aarch64_fp_compare_operand"
+  (ior (match_operand 0 "register_operand")
+       (and (match_code "const_double")
+           (match_test "aarch64_const_double_zero_rtx_p (op)"))))
+
+(define_predicate "aarch64_plus_immediate"
+  (and (match_code "const_int")
+       (ior (match_test "aarch64_uimm12_shift (INTVAL (op))")
+           (match_test "aarch64_uimm12_shift (-INTVAL (op))"))))
+
+(define_predicate "aarch64_plus_operand"
+  (ior (match_operand 0 "register_operand")
+       (match_operand 0 "aarch64_plus_immediate")))
+
+(define_predicate "aarch64_pluslong_immediate"
+  (and (match_code "const_int")
+       (match_test "(INTVAL (op) < 0xffffff && INTVAL (op) > -0xffffff)")))
+
+(define_predicate "aarch64_pluslong_operand"
+  (ior (match_operand 0 "register_operand")
+       (match_operand 0 "aarch64_pluslong_immediate")))
+
+(define_predicate "aarch64_logical_immediate"
+  (and (match_code "const_int")
+       (match_test "aarch64_bitmask_imm (INTVAL (op), mode)")))
+
+(define_predicate "aarch64_logical_operand"
+  (ior (match_operand 0 "register_operand")
+       (match_operand 0 "aarch64_logical_immediate")))
+
+(define_predicate "aarch64_shift_imm_si"
+  (and (match_code "const_int")
+       (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) < 32")))
+
+(define_predicate "aarch64_shift_imm_di"
+  (and (match_code "const_int")
+       (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) < 64")))
+
+(define_predicate "aarch64_reg_or_shift_imm_si"
+  (ior (match_operand 0 "register_operand")
+       (match_operand 0 "aarch64_shift_imm_si")))
+
+(define_predicate "aarch64_reg_or_shift_imm_di"
+  (ior (match_operand 0 "register_operand")
+       (match_operand 0 "aarch64_shift_imm_di")))
+
+;; The imm3 field is a 3-bit field that only accepts immediates in the
+;; range 0..4.
+(define_predicate "aarch64_imm3"
+  (and (match_code "const_int")
+       (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) <= 4")))
+
+(define_predicate "aarch64_pwr_imm3"
+  (and (match_code "const_int")
+       (match_test "INTVAL (op) != 0
+                   && (unsigned) exact_log2 (INTVAL (op)) <= 4")))
+
+(define_predicate "aarch64_pwr_2_si"
+  (and (match_code "const_int")
+       (match_test "INTVAL (op) != 0
+                   && (unsigned) exact_log2 (INTVAL (op)) < 32")))
+
+(define_predicate "aarch64_pwr_2_di"
+  (and (match_code "const_int")
+       (match_test "INTVAL (op) != 0
+                   && (unsigned) exact_log2 (INTVAL (op)) < 64")))
+
+(define_predicate "aarch64_mem_pair_operand"
+  (and (match_code "mem")
+       (match_test "aarch64_legitimate_address_p (mode, XEXP (op, 0), PARALLEL,
+                                              0)")))
+
+(define_predicate "aarch64_const_address"
+  (and (match_code "symbol_ref")
+       (match_test "mode == DImode && CONSTANT_ADDRESS_P (op)")))
+
+(define_predicate "aarch64_valid_symref"
+  (match_code "const, symbol_ref, label_ref")
+{
+  enum aarch64_symbol_type symbol_type;
+  return (aarch64_symbolic_constant_p (op, SYMBOL_CONTEXT_ADR, &symbol_type)
+        && symbol_type != SYMBOL_FORCE_TO_MEM);
+})
+
+(define_predicate "aarch64_tls_ie_symref"
+  (match_code "const, symbol_ref, label_ref")
+{
+  switch (GET_CODE (op))
+    {
+    case CONST:
+      op = XEXP (op, 0);
+      if (GET_CODE (op) != PLUS
+         || GET_CODE (XEXP (op, 0)) != SYMBOL_REF
+         || GET_CODE (XEXP (op, 1)) != CONST_INT)
+       return false;
+      op = XEXP (op, 0);
+
+    case SYMBOL_REF:
+      return SYMBOL_REF_TLS_MODEL (op) == TLS_MODEL_INITIAL_EXEC;
+
+    default:
+      gcc_unreachable ();
+    }
+})
+
+(define_predicate "aarch64_tls_le_symref"
+  (match_code "const, symbol_ref, label_ref")
+{
+  switch (GET_CODE (op))
+    {
+    case CONST:
+      op = XEXP (op, 0);
+      if (GET_CODE (op) != PLUS
+         || GET_CODE (XEXP (op, 0)) != SYMBOL_REF
+         || GET_CODE (XEXP (op, 1)) != CONST_INT)
+       return false;
+      op = XEXP (op, 0);
+
+    case SYMBOL_REF:
+      return SYMBOL_REF_TLS_MODEL (op) == TLS_MODEL_LOCAL_EXEC;
+
+    default:
+      gcc_unreachable ();
+    }
+})
+
+(define_predicate "aarch64_mov_operand"
+  (and (match_code "reg,subreg,mem,const_int,symbol_ref,high")
+       (ior (match_operand 0 "register_operand")
+           (ior (match_operand 0 "memory_operand")
+                (ior (match_test "GET_CODE (op) == HIGH
+                                  && aarch64_valid_symref (XEXP (op, 0),
+                                                           GET_MODE (XEXP (op, 0)))")
+                     (ior (match_test "CONST_INT_P (op)
+                                       && aarch64_move_imm (INTVAL (op), mode)")
+                          (match_test "aarch64_const_address (op, mode)")))))))
+
+(define_predicate "aarch64_movti_operand"
+  (and (match_code "reg,subreg,mem,const_int")
+       (ior (match_operand 0 "register_operand")
+           (ior (match_operand 0 "memory_operand")
+                (match_operand 0 "const_int_operand")))))
+
+(define_predicate "aarch64_reg_or_imm"
+  (and (match_code "reg,subreg,const_int")
+       (ior (match_operand 0 "register_operand")
+           (match_operand 0 "const_int_operand"))))
+
+;; True for integer comparisons and for FP comparisons other than LTGT or UNEQ.
+(define_special_predicate "aarch64_comparison_operator"
+  (match_code "eq,ne,le,lt,ge,gt,geu,gtu,leu,ltu,unordered,ordered,unlt,unle,unge,ungt"))
+
+;; True if the operand is memory reference suitable for a load/store exclusive.
+(define_predicate "aarch64_sync_memory_operand"
+  (and (match_operand 0 "memory_operand")
+       (match_code "reg" "0")))
+
+;; Predicates for parallel expanders based on mode.
+(define_special_predicate "vect_par_cnst_hi_half"
+  (match_code "parallel")
+{
+  HOST_WIDE_INT count = XVECLEN (op, 0);
+  int nunits = GET_MODE_NUNITS (mode);
+  int i;
+
+  if (count < 1
+      || count != nunits / 2)
+    return false;
+  if (!VECTOR_MODE_P (mode))
+    return false;
+
+  for (i = 0; i < count; i++)
+   {
+     rtx elt = XVECEXP (op, 0, i);
+     int val;
+
+     if (GET_CODE (elt) != CONST_INT)
+       return false;
+
+     val = INTVAL (elt);
+     if (val != (nunits / 2) + i)
+       return false;
+   }
+  return true;
+})
+
+(define_special_predicate "vect_par_cnst_lo_half"
+  (match_code "parallel")
+{
+  HOST_WIDE_INT count = XVECLEN (op, 0);
+  int nunits = GET_MODE_NUNITS (mode);
+  int i;
+
+  if (count < 1
+      || count != nunits / 2)
+    return false;
+
+  if (!VECTOR_MODE_P (mode))
+    return false;
+
+  for (i = 0; i < count; i++)
+   {
+     rtx elt = XVECEXP (op, 0, i);
+     int val;
+
+     if (GET_CODE (elt) != CONST_INT)
+       return false;
+
+     val = INTVAL (elt);
+     if (val != i)
+       return false;
+   }
+  return true;
+})
+
+
+(define_special_predicate "aarch64_simd_lshift_imm"
+  (match_code "const_vector")
+{
+  return aarch64_simd_shift_imm_p (op, mode, true);
+})
+
+(define_special_predicate "aarch64_simd_rshift_imm"
+  (match_code "const_vector")
+{
+  return aarch64_simd_shift_imm_p (op, mode, false);
+})
+
+(define_predicate "aarch64_simd_reg_or_zero"
+  (and (match_code "reg,subreg,const_int,const_vector")
+       (ior (match_operand 0 "register_operand")
+           (ior (match_test "op == const0_rtx")
+                (match_test "aarch64_simd_imm_zero_p (op, mode)")))))
+
+(define_predicate "aarch64_simd_struct_operand"
+  (and (match_code "mem")
+       (match_test "TARGET_SIMD && aarch64_simd_mem_operand_p (op)")))
+
+;; Like general_operand but allow only valid SIMD addressing modes.
+(define_predicate "aarch64_simd_general_operand"
+  (and (match_operand 0 "general_operand")
+       (match_test "!MEM_P (op)
+                   || GET_CODE (XEXP (op, 0)) == POST_INC
+                   || GET_CODE (XEXP (op, 0)) == REG")))
+
+;; Like nonimmediate_operand but allow only valid SIMD addressing modes.
+(define_predicate "aarch64_simd_nonimmediate_operand"
+  (and (match_operand 0 "nonimmediate_operand")
+       (match_test "!MEM_P (op)
+                   || GET_CODE (XEXP (op, 0)) == POST_INC
+                   || GET_CODE (XEXP (op, 0)) == REG")))
+
+(define_special_predicate "aarch64_simd_imm_zero"
+  (match_code "const_vector")
+{
+  return aarch64_simd_imm_zero_p (op, mode);
+})
diff --git a/gcc/config/aarch64/small.md b/gcc/config/aarch64/small.md
new file mode 100644 (file)
index 0000000..8f70ca9
--- /dev/null
@@ -0,0 +1,287 @@
+;; Copyright (C) 2012 Free Software Foundation, Inc.
+;;
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; In the absence of any ARMv8-A implementations, two examples derived
+;; from ARM's most recent ARMv7-A cores (Cortex-A7 and Cortex-A15) are
+;; included by way of example.  This is a temporary measure.
+
+;; Example pipeline description for an example 'small' core
+;; implementing AArch64
+
+;;-------------------------------------------------------
+;; General Description
+;;-------------------------------------------------------
+
+(define_automaton "small_cpu")
+
+;; The core is modelled as a single issue pipeline with the following
+;; dispatch units.
+;; 1. One pipeline for simple intructions.
+;; 2. One pipeline for branch intructions.
+;;
+;; There are five pipeline stages.
+;; The decode/issue stages operate the same for all instructions.
+;; Instructions always advance one stage per cycle in order.
+;; Only branch instructions may dual-issue with other instructions, except
+;; when those instructions take multiple cycles to issue.
+
+
+;;-------------------------------------------------------
+;; CPU Units and Reservations
+;;-------------------------------------------------------
+
+(define_cpu_unit "small_cpu_unit_i" "small_cpu")
+(define_cpu_unit "small_cpu_unit_br" "small_cpu")
+
+;; Pseudo-unit for blocking the multiply pipeline when a double-precision
+;; multiply is in progress.
+(define_cpu_unit "small_cpu_unit_fpmul_pipe" "small_cpu")
+
+;; The floating-point add pipeline, used to model the usage
+;; of the add pipeline by fp alu instructions.
+(define_cpu_unit "small_cpu_unit_fpadd_pipe" "small_cpu")
+
+;; Floating-point division pipeline (long latency, out-of-order completion).
+(define_cpu_unit "small_cpu_unit_fpdiv" "small_cpu")
+
+
+;;-------------------------------------------------------
+;; Simple ALU Instructions
+;;-------------------------------------------------------
+
+;; Simple ALU operations without shift
+(define_insn_reservation "small_cpu_alu" 2
+  (and (eq_attr "tune" "small")
+       (eq_attr "v8type" "adc,alu,alu_ext"))
+  "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_logic" 2
+  (and (eq_attr "tune" "small")
+       (eq_attr "v8type" "logic,logic_imm"))
+  "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_shift" 2
+  (and (eq_attr "tune" "small")
+       (eq_attr "v8type" "shift,shift_imm"))
+  "small_cpu_unit_i")
+
+;; Simple ALU operations with immediate shift
+(define_insn_reservation "small_cpu_alu_shift" 2
+  (and (eq_attr "tune" "small")
+       (eq_attr "v8type" "alu_shift"))
+  "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_logic_shift" 2
+  (and (eq_attr "tune" "small")
+       (eq_attr "v8type" "logic_shift"))
+  "small_cpu_unit_i")
+
+
+;;-------------------------------------------------------
+;; Multiplication/Division
+;;-------------------------------------------------------
+
+;; Simple multiplication
+(define_insn_reservation "small_cpu_mult_single" 2
+  (and (eq_attr "tune" "small")
+       (and (eq_attr "v8type" "mult,madd") (eq_attr "mode" "SI")))
+  "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_mult_double" 3
+  (and (eq_attr "tune" "small")
+       (and (eq_attr "v8type" "mult,madd") (eq_attr "mode" "DI")))
+  "small_cpu_unit_i")
+
+;; 64-bit multiplication
+(define_insn_reservation "small_cpu_mull" 3
+  (and (eq_attr "tune" "small") (eq_attr "v8type" "mull,mulh,maddl"))
+  "small_cpu_unit_i * 2")
+
+;; Division
+(define_insn_reservation "small_cpu_udiv_single" 5
+  (and (eq_attr "tune" "small")
+       (and (eq_attr "v8type" "udiv") (eq_attr "mode" "SI")))
+  "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_udiv_double" 10
+  (and (eq_attr "tune" "small")
+       (and (eq_attr "v8type" "udiv") (eq_attr "mode" "DI")))
+  "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_sdiv_single" 6
+  (and (eq_attr "tune" "small")
+       (and (eq_attr "v8type" "sdiv") (eq_attr "mode" "SI")))
+  "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_sdiv_double" 12
+  (and (eq_attr "tune" "small")
+       (and (eq_attr "v8type" "sdiv") (eq_attr "mode" "DI")))
+  "small_cpu_unit_i")
+
+
+;;-------------------------------------------------------
+;; Load/Store Instructions
+;;-------------------------------------------------------
+
+(define_insn_reservation "small_cpu_load1" 2
+  (and (eq_attr "tune" "small")
+       (eq_attr "v8type" "load_acq,load1"))
+  "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_store1" 0
+  (and (eq_attr "tune" "small")
+       (eq_attr "v8type" "store_rel,store1"))
+  "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_load2" 3
+  (and (eq_attr "tune" "small")
+       (eq_attr "v8type" "load2"))
+  "small_cpu_unit_i + small_cpu_unit_br, small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_store2" 0
+  (and (eq_attr "tune" "small")
+       (eq_attr "v8type" "store2"))
+  "small_cpu_unit_i + small_cpu_unit_br, small_cpu_unit_i")
+
+
+;;-------------------------------------------------------
+;; Branches
+;;-------------------------------------------------------
+
+;; Direct branches are the only instructions that can dual-issue.
+;; The latency here represents when the branch actually takes place.
+
+(define_insn_reservation "small_cpu_unit_br" 3
+  (and (eq_attr "tune" "small")
+       (eq_attr "v8type" "branch,call"))
+  "small_cpu_unit_br")
+
+
+;;-------------------------------------------------------
+;; Floating-point arithmetic.
+;;-------------------------------------------------------
+
+(define_insn_reservation "small_cpu_fpalu" 4
+  (and (eq_attr "tune" "small")
+       (eq_attr "v8type" "ffarith,fadd,fccmp,fcvt,fcmp"))
+  "small_cpu_unit_i + small_cpu_unit_fpadd_pipe")
+
+(define_insn_reservation "small_cpu_fconst" 3
+  (and (eq_attr "tune" "small")
+       (eq_attr "v8type" "fconst"))
+  "small_cpu_unit_i + small_cpu_unit_fpadd_pipe")
+
+(define_insn_reservation "small_cpu_fpmuls" 4
+  (and (eq_attr "tune" "small")
+       (and (eq_attr "v8type" "fmul") (eq_attr "mode" "SF")))
+  "small_cpu_unit_i + small_cpu_unit_fpmul_pipe")
+
+(define_insn_reservation "small_cpu_fpmuld" 7
+  (and (eq_attr "tune" "small")
+       (and (eq_attr "v8type" "fmul") (eq_attr "mode" "DF")))
+  "small_cpu_unit_i + small_cpu_unit_fpmul_pipe, small_cpu_unit_fpmul_pipe * 2,\
+   small_cpu_unit_i + small_cpu_unit_fpmul_pipe")
+
+
+;;-------------------------------------------------------
+;; Floating-point Division
+;;-------------------------------------------------------
+
+;; Single-precision divide takes 14 cycles to complete, and this
+;; includes the time taken for the special instruction used to collect the
+;; result to travel down the multiply pipeline.
+
+(define_insn_reservation "small_cpu_fdivs" 14
+  (and (eq_attr "tune" "small")
+       (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "SF")))
+  "small_cpu_unit_i, small_cpu_unit_fpdiv * 13")
+
+(define_insn_reservation "small_cpu_fdivd" 29
+  (and (eq_attr "tune" "small")
+       (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "DF")))
+  "small_cpu_unit_i, small_cpu_unit_fpdiv * 28")
+
+
+;;-------------------------------------------------------
+;; Floating-point Transfers
+;;-------------------------------------------------------
+
+(define_insn_reservation "small_cpu_i2f" 4
+  (and (eq_attr "tune" "small")
+       (eq_attr "v8type" "fmovi2f"))
+  "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_f2i" 2
+  (and (eq_attr "tune" "small")
+       (eq_attr "v8type" "fmovf2i"))
+  "small_cpu_unit_i")
+
+
+;;-------------------------------------------------------
+;; Floating-point Load/Store
+;;-------------------------------------------------------
+
+(define_insn_reservation "small_cpu_floads" 4
+  (and (eq_attr "tune" "small")
+       (and (eq_attr "v8type" "fpsimd_load") (eq_attr "mode" "SF")))
+  "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_floadd" 5
+  (and (eq_attr "tune" "small")
+       (and (eq_attr "v8type" "fpsimd_load") (eq_attr "mode" "DF")))
+  "small_cpu_unit_i + small_cpu_unit_br, small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_fstores" 0
+  (and (eq_attr "tune" "small")
+       (and (eq_attr "v8type" "fpsimd_store") (eq_attr "mode" "SF")))
+  "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_fstored" 0
+  (and (eq_attr "tune" "small")
+       (and (eq_attr "v8type" "fpsimd_store") (eq_attr "mode" "DF")))
+  "small_cpu_unit_i + small_cpu_unit_br, small_cpu_unit_i")
+
+
+;;-------------------------------------------------------
+;; Bypasses
+;;-------------------------------------------------------
+
+;; Forwarding path for unshifted operands.
+
+(define_bypass 1 "small_cpu_alu, small_cpu_alu_shift" 
+  "small_cpu_alu, small_cpu_alu_shift, small_cpu_logic, small_cpu_logic_shift, small_cpu_shift")
+
+(define_bypass 1 "small_cpu_logic, small_cpu_logic_shift" 
+  "small_cpu_alu, small_cpu_alu_shift, small_cpu_logic, small_cpu_logic_shift, small_cpu_shift")
+
+(define_bypass 1 "small_cpu_shift" 
+  "small_cpu_alu, small_cpu_alu_shift, small_cpu_logic, small_cpu_logic_shift, small_cpu_shift")
+
+;; Load-to-use for floating-point values has a penalty of one cycle.
+
+(define_bypass 2 "small_cpu_floads"
+                 "small_cpu_fpalu, small_cpu_fpmuld,\
+                 small_cpu_fdivs, small_cpu_fdivd,\
+                 small_cpu_f2i")
+
+(define_bypass 3 "small_cpu_floadd"
+                 "small_cpu_fpalu, small_cpu_fpmuld,\
+                 small_cpu_fdivs, small_cpu_fdivd,\
+                 small_cpu_f2i")
diff --git a/gcc/config/aarch64/sync.md b/gcc/config/aarch64/sync.md
new file mode 100644 (file)
index 0000000..61f1f1b
--- /dev/null
@@ -0,0 +1,467 @@
+;; Machine description for AArch64 processor synchronization primitives.
+;; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_c_enum "unspecv"
+ [
+    UNSPECV_SYNC_COMPARE_AND_SWAP       ; Represent a sync_compare_and_swap.
+    UNSPECV_SYNC_LOCK                  ; Represent a sync_lock_test_and_set.
+    UNSPECV_SYNC_LOCK_RELEASE          ; Represent a sync_lock_release.
+    UNSPECV_SYNC_OP                    ; Represent a sync_<op>
+    UNSPECV_SYNC_NEW_OP                        ; Represent a sync_new_<op>
+    UNSPECV_SYNC_OLD_OP                        ; Represent a sync_old_<op>
+])
+
+(define_expand "sync_compare_and_swap<mode>"
+  [(set (match_operand:ALLI 0 "register_operand")
+        (unspec_volatile:ALLI [(match_operand:ALLI 1 "memory_operand")
+                              (match_operand:ALLI 2 "register_operand")
+                              (match_operand:ALLI 3 "register_operand")]
+                              UNSPECV_SYNC_COMPARE_AND_SWAP))]
+  ""
+  {
+    struct aarch64_sync_generator generator;
+    generator.op = aarch64_sync_generator_omrn;
+    generator.u.omrn = gen_aarch64_sync_compare_and_swap<mode>;
+    aarch64_expand_sync (<MODE>mode, &generator, operands[0], operands[1],
+                        operands[2], operands[3]);
+    DONE;
+  })
+
+(define_expand "sync_lock_test_and_set<mode>"
+  [(match_operand:ALLI 0 "register_operand")
+   (match_operand:ALLI 1 "memory_operand")
+   (match_operand:ALLI 2 "register_operand")]
+  ""
+  {
+    struct aarch64_sync_generator generator;
+    generator.op = aarch64_sync_generator_omn;
+    generator.u.omn = gen_aarch64_sync_lock_test_and_set<mode>;
+    aarch64_expand_sync (<MODE>mode, &generator, operands[0], operands[1],
+                         NULL, operands[2]);
+    DONE;
+  })
+
+(define_expand "sync_<optab><mode>"
+  [(match_operand:ALLI 0 "memory_operand")
+   (match_operand:ALLI 1 "register_operand")
+   (syncop:ALLI (match_dup 0) (match_dup 1))]
+  ""
+  {
+    struct aarch64_sync_generator generator;
+    generator.op = aarch64_sync_generator_omn;
+    generator.u.omn = gen_aarch64_sync_new_<optab><mode>;
+    aarch64_expand_sync (<MODE>mode, &generator, NULL, operands[0], NULL,
+                         operands[1]);
+    DONE;
+  })
+
+(define_expand "sync_nand<mode>"
+  [(match_operand:ALLI 0 "memory_operand")
+   (match_operand:ALLI 1 "register_operand")
+   (not:ALLI (and:ALLI (match_dup 0) (match_dup 1)))]
+  ""
+  {
+    struct aarch64_sync_generator generator;
+    generator.op = aarch64_sync_generator_omn;
+    generator.u.omn = gen_aarch64_sync_new_nand<mode>;
+    aarch64_expand_sync (<MODE>mode, &generator, NULL, operands[0], NULL,
+                         operands[1]);
+    DONE;
+  })
+
+(define_expand "sync_new_<optab><mode>"
+  [(match_operand:ALLI 0 "register_operand")
+   (match_operand:ALLI 1 "memory_operand")
+   (match_operand:ALLI 2 "register_operand")
+   (syncop:ALLI (match_dup 1) (match_dup 2))]
+  ""
+  {
+    struct aarch64_sync_generator generator;
+    generator.op = aarch64_sync_generator_omn;
+    generator.u.omn = gen_aarch64_sync_new_<optab><mode>;
+    aarch64_expand_sync (<MODE>mode, &generator, operands[0], operands[1],
+                        NULL, operands[2]);
+    DONE;
+  })
+
+(define_expand "sync_new_nand<mode>"
+  [(match_operand:ALLI 0 "register_operand")
+   (match_operand:ALLI 1 "memory_operand")
+   (match_operand:ALLI 2 "register_operand")
+   (not:ALLI (and:ALLI (match_dup 1) (match_dup 2)))]
+  ""
+  {
+    struct aarch64_sync_generator generator;
+    generator.op = aarch64_sync_generator_omn;
+    generator.u.omn = gen_aarch64_sync_new_nand<mode>;
+    aarch64_expand_sync (<MODE>mode, &generator, operands[0], operands[1],
+                        NULL, operands[2]);
+    DONE;
+  });
+
+(define_expand "sync_old_<optab><mode>"
+  [(match_operand:ALLI 0 "register_operand")
+   (match_operand:ALLI 1 "memory_operand")
+   (match_operand:ALLI 2 "register_operand")
+   (syncop:ALLI (match_dup 1) (match_dup 2))]
+  ""
+  {
+    struct aarch64_sync_generator generator;
+    generator.op = aarch64_sync_generator_omn;
+    generator.u.omn = gen_aarch64_sync_old_<optab><mode>;
+    aarch64_expand_sync (<MODE>mode, &generator, operands[0], operands[1],
+                        NULL, operands[2]);
+    DONE;
+  })
+
+(define_expand "sync_old_nand<mode>"
+  [(match_operand:ALLI 0 "register_operand")
+   (match_operand:ALLI 1 "memory_operand")
+   (match_operand:ALLI 2 "register_operand")
+   (not:ALLI (and:ALLI (match_dup 1) (match_dup 2)))]
+  ""
+  {
+    struct aarch64_sync_generator generator;
+    generator.op = aarch64_sync_generator_omn;
+    generator.u.omn = gen_aarch64_sync_old_nand<mode>;
+    aarch64_expand_sync (<MODE>mode, &generator, operands[0], operands[1],
+                         NULL, operands[2]);
+    DONE;
+  })
+
+(define_expand "memory_barrier"
+  [(set (match_dup 0) (unspec:BLK [(match_dup 0)] UNSPEC_MB))]
+  ""
+{
+  operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+  MEM_VOLATILE_P (operands[0]) = 1;
+})
+
+(define_insn "aarch64_sync_compare_and_swap<mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=&r")
+        (unspec_volatile:GPI
+         [(match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")
+          (match_operand:GPI 2 "register_operand" "r")
+          (match_operand:GPI 3 "register_operand" "r")]
+         UNSPECV_SYNC_COMPARE_AND_SWAP))
+   (set (match_dup 1) (unspec_volatile:GPI [(match_dup 2)]
+                                          UNSPECV_SYNC_COMPARE_AND_SWAP))
+   (clobber:GPI (match_scratch:GPI 4 "=&r"))
+   (set (reg:CC CC_REGNUM) (unspec_volatile:CC [(match_dup 1)]
+                                                UNSPECV_SYNC_COMPARE_AND_SWAP))
+   ]
+  ""
+  {
+    return aarch64_output_sync_insn (insn, operands);
+  }
+  [(set_attr "sync_result"          "0")
+   (set_attr "sync_memory"          "1")
+   (set_attr "sync_required_value"  "2")
+   (set_attr "sync_new_value"       "3")
+   (set_attr "sync_t1"              "0")
+   (set_attr "sync_t2"              "4")
+   ])
+
+(define_insn "aarch64_sync_compare_and_swap<mode>"
+  [(set (match_operand:SI 0 "register_operand" "=&r")
+        (zero_extend:SI
+         (unspec_volatile:SHORT
+           [(match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q")
+            (match_operand:SI 2 "register_operand" "r")
+            (match_operand:SI 3 "register_operand" "r")]
+           UNSPECV_SYNC_COMPARE_AND_SWAP)))
+   (set (match_dup 1) (unspec_volatile:SHORT [(match_dup 2)]
+                                             UNSPECV_SYNC_COMPARE_AND_SWAP))
+   (clobber:SI (match_scratch:SI 4 "=&r"))
+   (set (reg:CC CC_REGNUM) (unspec_volatile:CC [(match_dup 1)]
+                                                UNSPECV_SYNC_COMPARE_AND_SWAP))
+   ]
+  ""
+  {
+    return aarch64_output_sync_insn (insn, operands);
+  }
+  [(set_attr "sync_result"          "0")
+   (set_attr "sync_memory"          "1")
+   (set_attr "sync_required_value"  "2")
+   (set_attr "sync_new_value"       "3")
+   (set_attr "sync_t1"              "0")
+   (set_attr "sync_t2"              "4")
+   ])
+
+(define_insn "aarch64_sync_lock_test_and_set<mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=&r")
+        (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q"))
+   (set (match_dup 1)
+        (unspec_volatile:GPI [(match_operand:GPI 2 "register_operand" "r")]
+                            UNSPECV_SYNC_LOCK))
+   (clobber (reg:CC CC_REGNUM))
+   (clobber (match_scratch:GPI 3 "=&r"))]
+  ""
+  {
+    return aarch64_output_sync_insn (insn, operands);
+  }
+  [(set_attr "sync_release_barrier" "no")
+   (set_attr "sync_result"          "0")
+   (set_attr "sync_memory"          "1")
+   (set_attr "sync_new_value"       "2")
+   (set_attr "sync_t1"              "0")
+   (set_attr "sync_t2"              "3")
+   ])
+
+(define_insn "aarch64_sync_lock_test_and_set<mode>"
+  [(set (match_operand:SI 0 "register_operand" "=&r")
+        (zero_extend:SI (match_operand:SHORT 1
+                         "aarch64_sync_memory_operand" "+Q")))
+   (set (match_dup 1)
+        (unspec_volatile:SHORT [(match_operand:SI 2 "register_operand" "r")]
+                               UNSPECV_SYNC_LOCK))
+   (clobber (reg:CC CC_REGNUM))
+   (clobber (match_scratch:SI 3 "=&r"))]
+  ""
+  {
+    return aarch64_output_sync_insn (insn, operands);
+  }
+  [(set_attr "sync_release_barrier" "no")
+   (set_attr "sync_result"          "0")
+   (set_attr "sync_memory"          "1")
+   (set_attr "sync_new_value"       "2")
+   (set_attr "sync_t1"              "0")
+   (set_attr "sync_t2"              "3")
+   ])
+
+(define_insn "aarch64_sync_new_<optab><mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=&r")
+        (unspec_volatile:GPI
+         [(syncop:GPI
+            (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")
+             (match_operand:GPI 2 "register_operand" "r"))]
+           UNSPECV_SYNC_NEW_OP))
+   (set (match_dup 1)
+        (unspec_volatile:GPI [(match_dup 1) (match_dup 2)]
+                           UNSPECV_SYNC_NEW_OP))
+   (clobber (reg:CC CC_REGNUM))
+   (clobber (match_scratch:GPI 3 "=&r"))]
+  ""
+  {
+    return aarch64_output_sync_insn (insn, operands);
+  }
+  [(set_attr "sync_result"          "0")
+   (set_attr "sync_memory"          "1")
+   (set_attr "sync_new_value"       "2")
+   (set_attr "sync_t1"              "0")
+   (set_attr "sync_t2"              "3")
+   (set_attr "sync_op"              "<optab>")
+   ])
+
+(define_insn "aarch64_sync_new_nand<mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=&r")
+        (unspec_volatile:GPI
+         [(not:GPI (and:GPI
+                     (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")
+                     (match_operand:GPI 2 "register_operand" "r")))]
+          UNSPECV_SYNC_NEW_OP))
+   (set (match_dup 1)
+        (unspec_volatile:GPI [(match_dup 1) (match_dup 2)]
+                           UNSPECV_SYNC_NEW_OP))
+   (clobber (reg:CC CC_REGNUM))
+   (clobber (match_scratch:GPI 3 "=&r"))]
+  ""
+  {
+    return aarch64_output_sync_insn (insn, operands);
+  }
+  [(set_attr "sync_result"          "0")
+   (set_attr "sync_memory"          "1")
+   (set_attr "sync_new_value"       "2")
+   (set_attr "sync_t1"              "0")
+   (set_attr "sync_t2"              "3")
+   (set_attr "sync_op"              "nand")
+   ])
+
+(define_insn "aarch64_sync_new_<optab><mode>"
+  [(set (match_operand:SI 0 "register_operand" "=&r")
+        (unspec_volatile:SI
+         [(syncop:SI
+             (zero_extend:SI
+              (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))
+               (match_operand:SI 2 "register_operand" "r"))]
+          UNSPECV_SYNC_NEW_OP))
+   (set (match_dup 1)
+        (unspec_volatile:SHORT [(match_dup 1) (match_dup 2)]
+                              UNSPECV_SYNC_NEW_OP))
+   (clobber (reg:CC CC_REGNUM))
+   (clobber (match_scratch:SI 3 "=&r"))]
+  ""
+  {
+    return aarch64_output_sync_insn (insn, operands);
+  }
+  [(set_attr "sync_result"          "0")
+   (set_attr "sync_memory"          "1")
+   (set_attr "sync_new_value"       "2")
+   (set_attr "sync_t1"              "0")
+   (set_attr "sync_t2"              "3")
+   (set_attr "sync_op"              "<optab>")
+   ])
+
+(define_insn "aarch64_sync_new_nand<mode>"
+  [(set (match_operand:SI 0 "register_operand" "=&r")
+        (unspec_volatile:SI
+         [(not:SI
+            (and:SI
+               (zero_extend:SI
+                (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))
+               (match_operand:SI 2 "register_operand" "r")))
+         ] UNSPECV_SYNC_NEW_OP))
+   (set (match_dup 1)
+        (unspec_volatile:SHORT [(match_dup 1) (match_dup 2)]
+                              UNSPECV_SYNC_NEW_OP))
+   (clobber (reg:CC CC_REGNUM))
+   (clobber (match_scratch:SI 3 "=&r"))]
+  ""
+  {
+    return aarch64_output_sync_insn (insn, operands);
+  }
+  [(set_attr "sync_result"          "0")
+   (set_attr "sync_memory"          "1")
+   (set_attr "sync_new_value"       "2")
+   (set_attr "sync_t1"              "0")
+   (set_attr "sync_t2"              "3")
+   (set_attr "sync_op"              "nand")
+   ])
+
+(define_insn "aarch64_sync_old_<optab><mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=&r")
+        (unspec_volatile:GPI
+          [(syncop:GPI
+             (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")
+             (match_operand:GPI 2 "register_operand" "r"))]
+          UNSPECV_SYNC_OLD_OP))
+   (set (match_dup 1)
+        (unspec_volatile:GPI [(match_dup 1) (match_dup 2)]
+                            UNSPECV_SYNC_OLD_OP))
+   (clobber (reg:CC CC_REGNUM))
+   (clobber (match_scratch:GPI 3 "=&r"))
+   (clobber (match_scratch:GPI 4 "=&r"))]
+  ""
+  {
+    return aarch64_output_sync_insn (insn, operands);
+  }
+  [(set_attr "sync_result"          "0")
+   (set_attr "sync_memory"          "1")
+   (set_attr "sync_new_value"       "2")
+   (set_attr "sync_t1"              "3")
+   (set_attr "sync_t2"              "4")
+   (set_attr "sync_op"              "<optab>")
+   ])
+
+(define_insn "aarch64_sync_old_nand<mode>"
+  [(set (match_operand:GPI 0 "register_operand" "=&r")
+        (unspec_volatile:GPI
+         [(not:GPI (and:GPI
+                     (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")
+                     (match_operand:GPI 2 "register_operand" "r")))]
+          UNSPECV_SYNC_OLD_OP))
+   (set (match_dup 1)
+        (unspec_volatile:GPI [(match_dup 1) (match_dup 2)]
+                            UNSPECV_SYNC_OLD_OP))
+   (clobber (reg:CC CC_REGNUM))
+   (clobber (match_scratch:GPI 3 "=&r"))
+   (clobber (match_scratch:GPI 4 "=&r"))]
+  ""
+  {
+    return aarch64_output_sync_insn (insn, operands);
+  }
+  [(set_attr "sync_result"          "0")
+   (set_attr "sync_memory"          "1")
+   (set_attr "sync_new_value"       "2")
+   (set_attr "sync_t1"              "3")
+   (set_attr "sync_t2"              "4")
+   (set_attr "sync_op"              "nand")
+   ])
+
+(define_insn "aarch64_sync_old_<optab><mode>"
+  [(set (match_operand:SI 0 "register_operand" "=&r")
+        (unspec_volatile:SI
+         [(syncop:SI
+             (zero_extend:SI
+              (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))
+               (match_operand:SI 2 "register_operand" "r"))]
+           UNSPECV_SYNC_OLD_OP))
+   (set (match_dup 1)
+        (unspec_volatile:SHORT [(match_dup 1) (match_dup 2)]
+                              UNSPECV_SYNC_OLD_OP))
+   (clobber (reg:CC CC_REGNUM))
+   (clobber (match_scratch:SI 3 "=&r"))
+   (clobber (match_scratch:SI 4 "=&r"))]
+  ""
+  {
+    return aarch64_output_sync_insn (insn, operands);
+  }
+  [(set_attr "sync_result"          "0")
+   (set_attr "sync_memory"          "1")
+   (set_attr "sync_new_value"       "2")
+   (set_attr "sync_t1"              "3")
+   (set_attr "sync_t2"              "4")
+   (set_attr "sync_op"              "<optab>")
+   ])
+
+(define_insn "aarch64_sync_old_nand<mode>"
+  [(set (match_operand:SI 0 "register_operand" "=&r")
+        (unspec_volatile:SI
+         [(not:SI
+            (and:SI
+               (zero_extend:SI
+                (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))
+                 (match_operand:SI 2 "register_operand" "r")))]
+          UNSPECV_SYNC_OLD_OP))
+   (set (match_dup 1)
+        (unspec_volatile:SHORT [(match_dup 1) (match_dup 2)]
+                              UNSPECV_SYNC_OLD_OP))
+   (clobber (reg:CC CC_REGNUM))
+   (clobber (match_scratch:SI 3 "=&r"))
+   (clobber (match_scratch:SI 4 "=&r"))]
+  ""
+  {
+    return aarch64_output_sync_insn (insn, operands);
+  }
+  [(set_attr "sync_result"          "0")
+   (set_attr "sync_memory"          "1")
+   (set_attr "sync_new_value"       "2")
+   (set_attr "sync_t1"              "3")
+   (set_attr "sync_t2"              "4")
+   (set_attr "sync_op"              "nand")
+   ])
+
+(define_insn "*memory_barrier"
+  [(set (match_operand:BLK 0 "" "")
+       (unspec:BLK [(match_dup 0)] UNSPEC_MB))]
+  ""
+  "dmb\\tish"
+)
+
+(define_insn "sync_lock_release<mode>"
+  [(set (match_operand:ALLI 0 "memory_operand" "+Q")
+       (unspec_volatile:ALLI [(match_operand:ALLI 1 "register_operand" "r")]
+                             UNSPECV_SYNC_LOCK_RELEASE))]
+
+  ""
+  {
+    return aarch64_output_sync_lock_release (operands[1], operands[0]);
+  })
+
diff --git a/gcc/config/aarch64/t-aarch64 b/gcc/config/aarch64/t-aarch64
new file mode 100644 (file)
index 0000000..715ad1d
--- /dev/null
@@ -0,0 +1,32 @@
+# Machine description for AArch64 architecture.
+#  Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+#  Contributed by ARM Ltd.
+#
+#  This file is part of GCC.
+#
+#  GCC is free software; you can redistribute it and/or modify it
+#  under the terms of the GNU General Public License as published by
+#  the Free Software Foundation; either version 3, or (at your option)
+#  any later version.
+#
+#  GCC is distributed in the hope that it will be useful, but
+#  WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#  General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with GCC; see the file COPYING3.  If not see
+#  <http://www.gnu.org/licenses/>.
+
+$(srcdir)/config/aarch64/aarch64-tune.md: $(srcdir)/config/aarch64/gentune.sh \
+       $(srcdir)/config/aarch64/aarch64-cores.def
+       $(SHELL) $(srcdir)/config/aarch64/gentune.sh \
+               $(srcdir)/config/aarch64/aarch64-cores.def > \
+               $(srcdir)/config/aarch64/aarch64-tune.md
+
+aarch64-builtins.o: $(srcdir)/config/aarch64/aarch64-builtins.c $(CONFIG_H) \
+  $(SYSTEM_H) coretypes.h $(TM_H) \
+  $(RTL_H) $(TREE_H) expr.h $(TM_P_H) $(RECOG_H) langhooks.h \
+  $(DIAGNOSTIC_CORE_H) $(OPTABS_H)
+       $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+               $(srcdir)/config/aarch64/aarch64-builtins.c
diff --git a/gcc/config/aarch64/t-aarch64-linux b/gcc/config/aarch64/t-aarch64-linux
new file mode 100644 (file)
index 0000000..f6ec576
--- /dev/null
@@ -0,0 +1,22 @@
+# Machine description for AArch64 architecture.
+#  Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+#  Contributed by ARM Ltd.
+#
+#  This file is part of GCC.
+#
+#  GCC is free software; you can redistribute it and/or modify it
+#  under the terms of the GNU General Public License as published by
+#  the Free Software Foundation; either version 3, or (at your option)
+#  any later version.
+#
+#  GCC is distributed in the hope that it will be useful, but
+#  WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#  General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with GCC; see the file COPYING3.  If not see
+#  <http://www.gnu.org/licenses/>.
+
+LIB1ASMSRC   = aarch64/lib1funcs.asm
+LIB1ASMFUNCS = _aarch64_sync_cache_range